diff --git a/.gitattributes b/.gitattributes index a0a6038f1c1cb03117e75de8bf77d801b550bc63..5bac703b1e24592ac1e72e0b196f37d2cc86d1cc 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1156,3 +1156,11 @@ data/2025/2504_10xxx/2504.10957/aee32c72-0906-4851-a50f-6b02b7f21eea_origin.pdf data/2025/2504_11xxx/2504.11054/4145d5b1-8b48-4617-bddf-807b21a8d9a6_origin.pdf filter=lfs diff=lfs merge=lfs -text data/2025/2504_11xxx/2504.11171/b768317e-61d3-4f19-a242-b9cdc2cab557_origin.pdf filter=lfs diff=lfs merge=lfs -text data/2025/2504_11xxx/2504.11346/58cb6b1b-7ad5-4619-9d3e-81f1c5a39bc2_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_10xxx/2504.10478/c3757ad9-b534-4066-bb62-c73ee2cc8c04_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_10xxx/2504.10479/71273ce6-5170-4939-8354-af535b974810_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_10xxx/2504.10481/29785fca-1f46-4ab1-92e1-b0b4c9aee15b_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_10xxx/2504.10483/9b7bb575-f36f-48cf-a562-8fb8239c8a45_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_10xxx/2504.10685/2b7c0cf2-f712-45f3-86c5-afe1fcf3d48b_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_10xxx/2504.10686/02e14e26-d981-43b7-bd68-0bb6d5c44d72_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_10xxx/2504.10861/a4e0028e-483e-4013-a80e-4d616bb12d80_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_11xxx/2504.11491/0400dc9e-bb51-4dac-9ac6-e38f3b9731ae_origin.pdf filter=lfs diff=lfs merge=lfs -text diff --git a/data/2025/2504_10xxx/2504.10478/c3757ad9-b534-4066-bb62-c73ee2cc8c04_content_list.json b/data/2025/2504_10xxx/2504.10478/c3757ad9-b534-4066-bb62-c73ee2cc8c04_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..dd7c44df52c56f5e43d1a6316275a7d44f330def --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/c3757ad9-b534-4066-bb62-c73ee2cc8c04_content_list.json @@ -0,0 +1,5405 @@ +[ + { + "type": "text", + "text": "Weight Ensembling Improves Reasoning in Language Models", + "text_level": 1, + "bbox": [ + 111, + 95, + 656, + 150 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Xingyu Dang\\*,1 Christina Baek\\*,2 Kaiyue Wen3 Zico Kolter2 Aditi Raghunathan2", + "bbox": [ + 111, + 156, + 839, + 174 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{1}$ Tsinghua University $^{2}$ Carnegie Mellon University $^{3}$ Stanford University", + "bbox": [ + 112, + 178, + 691, + 196 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$\\text{品}$ dangxy20@mails.tsinghua.edu.cn,kbaek@andrew.cmu.edu", + "bbox": [ + 111, + 208, + 565, + 223 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 457, + 260, + 540, + 277 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "We investigate a failure mode that arises during the training of reasoning models, where the diversity of generations begins to collapse, leading to suboptimal test-time scaling. Notably, the Pass@1 rate reliably improves during supervised finetuning (SFT), but Pass@k rapidly deteriorates. Surprisingly, a simple intervention of interpolating the weights of the latest SFT checkpoint with an early checkpoint, otherwise known as WiSE-FT, almost completely recovers Pass@k while also improving Pass@1. The WiSE-FT variant achieves better test-time scaling (Best@k, majority vote) and achieves superior results with less data when tuned further by reinforcement learning. Finally, we find that WiSE-FT provides complementary performance gains that cannot be achieved only through diversity-inducing decoding strategies, like temperature scaling. We formalize a bias-variance tradeoff of Pass@k with respect to the expectation and variance of Pass@1 over the test distribution. We find that WiSE-FT can reduce bias and variance simultaneously, while temperature scaling inherently trades off between bias and variance.", + "bbox": [ + 169, + 297, + 831, + 494 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Introduction", + "text_level": 1, + "bbox": [ + 112, + 527, + 290, + 545 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Recent advances in large language models (LLMs) have showcased their remarkable ability to perform complex reasoning, yet these successes often hinge on test-time scaling strategies (Lightman et al., 2023; Snell et al., 2024; Wu et al., 2024). In many applications, such as math problems, puzzles, and logical reasoning, LLMs employ a verification framework where it is significantly easier for the model to verify a candidate solution than to generate one from scratch. This distinction has given rise to strategies that sample multiple \"reasoning traces\" or sequences of reasoning steps during inference, selecting the best final guess through an outcome reward model (ORM) or majority vote. In this setting, an upper bound on the performance a model could achieve is measured by Pass@K, or the probability that at least one out of $K$ independently sampled reasoning traces is correct.", + "bbox": [ + 109, + 569, + 887, + 708 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Unfortunately, while the standard training pipeline of supervised finetuning (SFT) followed by reinforcement learning (RL) dependably improves Pass@1 for reasoning, Pass@K tends to drop early into finetuning (Cobbe et al., 2021; Chow et al., 2024a; Chen et al., 2025). This mismatch arises from a symptom of finetuning called diversity collapse, where overtuned models yield less diverse generations. This is detrimental to Pass@K since the model wastes $K$ attempts on only a handful of guesses. In fact, by analyzing the model's error rate i.e., 1 - Pass@1, across the test distribution, we derive a Pass@K bias-variance trade-off. To improve expected test Pass@K, one can either reduce the bias which is the expected error rate or how much the model's error rate varies across problems. The latter term is connected to diversity - more diversity allows models to hedge and do uniformly well across all test questions. In particular, during SFT, Pass@1 improves (bias ↓) at the cost of diversity collapse (variance ↑).", + "bbox": [ + 109, + 712, + 885, + 867 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Surprisingly, common ways of alleviating diversity collapse, such as early stopping at peak Pass@K or decoding with high temperature, suffer from the reverse trade-off: diversity improves (variance $\\downarrow$ ) at the cost of overall Pass@1 degrading (bias $\\uparrow$ ). Consequently, in this paper we are concerned with a central question:", + "bbox": [ + 109, + 871, + 885, + 920 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 112, + 32, + 434, + 47 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.10478v4 [cs.LG] 7 Oct 2025", + "bbox": [ + 22, + 284, + 58, + 715 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 493, + 943, + 504, + 954 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/9a00ddd660bf8f8eeda9cd85892cb8a7e3465e904ab2b4e6cf073e2f5f617379.jpg", + "image_caption": [ + "Figure 1: Pass@k of WiSE-FT versus SFT on GSM8k Gemma-2-2B supervised finetuned and evaluated on GSM8k. At each SFT timestep $t$ , we evaluate Pass@k of checkpoint $w_{t}$ (in dashed) with its WiSE-FT variant $1/2 \\cdot w_{t} + 1/2 \\cdot w_{0}$ (in solid), where traces are independently sampled with temperature $T = [0.7, 1.0, 1.3, 1.6]$ ." + ], + "image_footnote": [ + "--- SFT T=0.7 --- SFT T=1.0 WiSE-FT T=1.0 SFT T=1.3 SFT T=1.6" + ], + "bbox": [ + 130, + 97, + 367, + 284 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/3536f4b1df50cd66ddd3bfbccb80c35fc10834c00925c31728553b31b2fbfd2a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 372, + 98, + 617, + 285 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/19a01a99597df80ac8df614f4c1787a0a5b99ab4663cc34196f872471af91463.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 622, + 98, + 867, + 284 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Is it possible to simultaneously improve both Pass@1 and Pass@K, thereby overcoming the bias-variance tradeoff inherent in current approaches?", + "bbox": [ + 122, + 412, + 875, + 446 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In our work, we introduce a simple, scalable and effective intervention that allows models to achieve both high Pass@K and Pass@1 across mathematical reasoning tasks GSM8k, MATH, and AIME. The specific technique we use is a variant of WiSE-FT (Wortsman et al., 2022) where we interpolate the weights of the latest SFT checkpoint $\\boldsymbol{w}_t$ with an early checkpoint $w_0$ as $\\boldsymbol{w}_{\\mathrm{WiSE}(t)} = \\frac{1}{2} \\cdot \\boldsymbol{w}_0 + \\frac{1}{2} \\cdot \\boldsymbol{w}_t$ . Our key finding is that WiSE-FT successfully merges the diverse sampling capabilities of earlier checkpoints while retaining or surpassing the Pass@1 of later checkpoints. In Figure 1, we observe that the WiSE-FT model achieves both higher Pass@K and Pass@1 with more SFT steps $t$ , unlike naive SFT which suffers from an early decay in Pass@K. Moreover, the gains with WiSE-FT is unachievable by early-stopping or diversity-aware decoding alone.", + "bbox": [ + 109, + 455, + 883, + 594 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Thus, we propose a new paradigm of training reasoning models: 1.) Train extensively using SFT as long as Pass@1 improves, 2.) Perform WiSE-FT with an earlier SFT checkpoint, 3.) Continue tuning the WiSE-FT variant using RL. Overall, the WiSE-FT model has the following immediate practical benefits:", + "bbox": [ + 109, + 599, + 883, + 647 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Better Test-Time Scaling Across all datasets and base models, the WiSE-FT variant achieves the highest performance with test-time scaling (Majority Vote, ORM) compared to an overtrained SFT model paired with diversity-aware decoding.", + "- Better Reinforcement Learning Since RL uses self-generated data to tune models, to generalize reliably, it is important for generations to provide sufficient learning signal while also having high coverage over the data space. We find that continued RL training starting from WiSE-FT weights achieves superior results with less synthetic data compared to initializing RL from the last SFT checkpoint and even early-stopped SFT." + ], + "bbox": [ + 122, + 657, + 879, + 782 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In summary, we provide a comprehensive analysis of how reasoning models suffer from diversity collapse during SFT and its negative downstream impact during RL and test-time scaling. We first discuss our WiSE-FT findings in §4. Motivated by this discovery, we investigate two fundamental questions. First, we investigate diversity collapse during SFT and RL of reasoning models in §5. Diversity collapse not only impacts the model's ability to attempt different guesses. In fact, we make an even stronger observation - the generations of reasoning models converge towards a single reasoning trace for each test question. We theoretically prove that standard RL algorithms (i.e., REINFORCE and GRPO) fail to recover lost diversity in a simplified discrete bandit setting.", + "bbox": [ + 109, + 796, + 883, + 919 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 114, + 32, + 431, + 47 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 493, + 941, + 504, + 954 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Second, we formalize the competing goals of Pass@1 and Pass@K as a bias-variance trade-off in §6. We empirically measure and compare the bias and variance of WiSE-FT versus early-stopping versus high temperature decoding. Notably, only WiSE-FT reduces both bias and variance. We conclude with a remark on the limitations of decoding strategies such as top-k (Shao et al., 2017), nucleus (Holtzman et al., 2020), and min-p (Nguyen et al., 2024), at eliciting the maximum capabilities with test-time scaling from current reasoning models.", + "bbox": [ + 109, + 95, + 883, + 189 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2 Related Works", + "text_level": 1, + "bbox": [ + 112, + 203, + 310, + 222 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Diversity collapse with SFT: The standard pipeline for enhancing reasoning in LLMs involves an initial phase of supervised fine-tuning (SFT) followed by reinforcement learning (RL) (Guo et al., 2025; Setlur et al., 2024). SFT is critical for instilling interpretable and readable reasoning chains and ensuring that the model adheres to a consistent rollout templates (Guo et al., 2025). However, a number of recent works have identified critical pitfalls of SFT that hinders the model's ability to explore and ultimately it's overall problem solving ability. Notably, Cobbe et al. (2021) observe diversity collapse when finetuning on GSM8k training dataset, during which the Pass@1 continuously improves whereas Pass@k starts to fall shortly into the training. Similar diversity collapse phenomenon also exists in the self-improvement setting with SFT (Song et al., 2024), and is theoretically investigated as the sharpening effect (Huang et al., 2024). This is not desirable as diverse sampling at inference is important for test-time scaling using majority voting (Wang et al., 2023) or reward model guided search (Setlur et al., 2024; Beeching et al., 2024). Yeo et al. (2025); Chu et al. (2025) attribute this behavior to overfitting, memorization of samples and overfixation to a template style leading to reduced generalization. In our work, we corroborate similar findings and propose ensembling over the course of SFT as a mitigation strategy.", + "bbox": [ + 109, + 239, + 883, + 454 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Mitigating diversity collapse: Given the importance of diversity for effectively scaling inference-time compute, several recent works have proposed auxiliary finetuning objectives and decoding strategies to mitigate diversity collapse. Li et al. (2025) regularize the SFT process using a game-theoretic framework that encourages sparse updates, thereby preserving output diversity. Zhang et al. (2024b) directly optimizes for diversity during finetuning. Other approaches modify the finetuning procedure to directly optimize for Best-of-N sampling at inference time (Chow et al., 2024b; Sessa et al., 2024; Chen et al., 2025). Another line of work focuses on inference-time decoding, explicitly encouraging diverse solutions through modified beam search strategies (Vijayakumar et al., 2018; Olausson et al., 2024; Chen et al., 2024; Beeching et al., 2024). Li et al. (2023) improve diversity during parallel decoding by appending curated prompts to the input. In formal reasoning settings e.g., Lean, methods such as Monte Carlo tree search have been used to diversify intermediate reasoning steps, as demonstrated in AlphaProof (AlphaProof and AlphaGeometry teams, 2024). In this work, we identify a simple and complementary intervention during the finetuning process to maintain the diversity of generations. We especially care about enforcing diversity while preserving the overall accuracy of generations.", + "bbox": [ + 109, + 476, + 883, + 690 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3 Preliminaries and Experimental Setup", + "text_level": 1, + "bbox": [ + 112, + 704, + 563, + 724 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1 Pass@k, Best@k, and Majority Vote", + "text_level": 1, + "bbox": [ + 112, + 742, + 472, + 760 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Given a reasoning model $f(\\cdot)$ , a decoding strategy $D$ , and problem $x$ , the model's solution is obtained by sampling a reasoning trace $r := [x, s^{(1)}, s^{(2)}, \\dots, s^{(n)}, \\hat{y}]$ consisting of a sequence of intermediate steps $s^{(i)}$ and a final guess $\\hat{y}$ . Given $k$ independently sampled traces, Pass@K measures the probability that at least one guess matches the true answer $y$ :", + "bbox": [ + 109, + 771, + 883, + 835 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\operatorname {P a s s} @ \\mathrm {K} (x) = \\mathbb {E} _ {[ \\boldsymbol {r} _ {i} ] _ {i = 1} ^ {k} \\sim D (f (x))} [ \\mathbb {1} \\{\\exists i \\in [ k ] \\text {s . t .} \\hat {y} _ {i} = y \\} ] = 1 - (1 - \\rho_ {x}) ^ {K} \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 225, + 840, + 885, + 864 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $\\rho_{x} = P(\\hat{y} = y\\mid x,f,D)$ is the Pass@1 or marginal probability of sampling the ground truth answer. Then $(1 - \\rho_x)^K$ is the probability that all $K$ guesses are incorrect. We will refer to Pass@1 as $\\rho_{x}$ interchangeably in our paper.", + "bbox": [ + 109, + 869, + 883, + 920 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 112, + 32, + 431, + 47 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 941, + 504, + 954 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In practice, test-time compute is scaled by selecting one of $K$ guesses either by a output reward model (ORM) or Majority Vote. Then we can measure Best@K as", + "bbox": [ + 109, + 95, + 883, + 127 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\operatorname {B e s t} @ \\mathrm {K} (x) = \\mathbb {E} _ {[ \\boldsymbol {r} _ {i} ] _ {i = 1} ^ {k} \\sim D (f (x))} [ \\hat {y} _ {i ^ {*}} = y ] \\text {w h e r e} i ^ {*} = \\arg \\max _ {i \\in [ K ]} \\sum_ {j = 1} ^ {K} \\mathbb {1} \\left\\{\\hat {y} _ {i} = \\hat {y} _ {j} \\right\\} \\text {o r} \\operatorname {O R M} (\\boldsymbol {r} _ {i})\n$$\n", + "text_format": "latex", + "bbox": [ + 165, + 138, + 830, + 184 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Notably, Pass@K is equivalent to Best@K using a perfect ORM verifier. As we will observe, WiSE-FT achieves both higher Pass@1 and Pass@K and this directly translates to achieving better Best@K with an ORM verifier and by Majority Vote.", + "bbox": [ + 109, + 191, + 883, + 239 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2 Weight-Space Ensembling (WiSE-FT)", + "text_level": 1, + "bbox": [ + 112, + 253, + 478, + 272 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "WiSE-FT is a weight-space ensembling technique proposed by Wortzman et al. (2022) to improve the out-of-distribution accuracy of finetuned models at no extra computational cost. In particular, while models tend to achieve better in-distribution performance after finetuning, they tend to be less robust to distribution shift. Surprisingly, by simply interpolating the weights of the finetuned model $\\boldsymbol{w}_t$ with the pretrained weights $\\boldsymbol{w}_0$", + "bbox": [ + 109, + 284, + 883, + 359 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {w} _ {\\mathrm {W i S E} (t)} = \\delta \\cdot \\boldsymbol {w} _ {0} + (1 - \\delta) \\cdot \\boldsymbol {w} _ {t} \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 374, + 369, + 883, + 390 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "WiSE-FT can achieve best of both words: the out-of-distribution accuracy of models improves without incurring a drop in in-distribution accuracy. Similar to this philosophy, we apply weight ensembling to achieve both the diverse generation ability of early SFT checkpoints while maintaining the high Pass@1 accuracy of later SFT checkpoints.", + "bbox": [ + 109, + 397, + 883, + 460 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.3 Training and Evaluation Pipeline", + "text_level": 1, + "bbox": [ + 112, + 474, + 447, + 493 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The majority of our experiments are conducted on Gemma-2-2B and Qwen-2.5-0.5B. We perform SFT on a 30K subset of rephrased augmentations of GSM8k (Cobbe et al., 2021) and MATH (Hendrycks et al., 2021) in MetaMath40k (Yu et al., 2023) for 1710 steps or 10 epochs. We then continue finetuning on another 30K subset of rephrased training questions from MetaMath using Group Relative Policy Optimization (GRPO) with a binary reward of the correctness of the model's final answer. Finally, we evaluate models on GSM8K and MATH500, respectively. To estimate the true Pass@K and Pass@1 marginalized over the distribution of sampled traces, we sample 100 reasoning traces per test example and average over them to estimate Pass@1, i.e. $\\rho_{x}$ . Then to calculate Pass@K, we use the theoretical formula $1 - (1 - \\rho_{x})^{K}$ in Equation 1. Unless noted otherwise, we employ a naive decoding strategy with top-p threshold 0.9, temperature $T = 0.8$ , and top-k with $K = 50$ .", + "bbox": [ + 109, + 503, + 883, + 657 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4 Improving Diverse Reasoning Capabilities by WiSE-FT", + "text_level": 1, + "bbox": [ + 109, + 674, + 733, + 696 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We first carefully track Pass@K for $K \\in \\{1, 4, 32\\}$ across the SFT trajectory of Qwen-2.5-0.5B and Gemma-2-2B. Similar to findings from Cobbe et al. (2021); Chen et al. (2025), we observe that Pass@1 continues to improve with longer SFT, whereas for larger $K = 4, 32$ , Pass@K tends to peak much earlier on in training (in Figure 1, 17, and 19). In other words, while later SFT checkpoints achieve higher Pass@1, earlier SFT checkpoint achieve higher Pass@K. This tradeoff in model selection is not ideal downstream for test-time scaling.", + "bbox": [ + 109, + 712, + 883, + 805 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Building upon this intuition, we propose weight ensembling between earlier and later SFT checkpoints. We apply a variant of WiSE-FT where instead of the pretrained model, we interpolate between the earliest SFT checkpoint (in our case, after 1 epoch of training) and the weights of later checkpoint. As shown in Figure 2, we observe a \"sweet spot\" of interpolation coefficients $\\delta \\in (0,1)$ where the WiSE-FT model achieves both higher Pass@1 than the last SFT model and higher Pass@K than the early SFT model. We will fix $\\delta = 1/2$ , which generally performs decently for all of the datasets we've tested. In fact, after WiSE-FT $w_{\\mathrm{WiSE}(t)}$ , both Pass@1 and Pass@k grow monotonically with SFT steps $t$ (see Figure 1).", + "bbox": [ + 109, + 811, + 883, + 921 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 112, + 32, + 431, + 47 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 493, + 941, + 504, + 954 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/546890eae7b6307835210a35ac4546692989e395b2041aa92121b17022a37557.jpg", + "image_caption": [ + "Figure 2: Pass@1 vs. Pass@K across Interpolation Coefficients We perform WiSEFT with $\\delta \\in [0.1, 0.9]$ between the first and last checkpoints of model (in legend) finetuned on GSM8K, MATH, and OpenThoughts-114K, then evaluate on GSM8K, MATH500, and AIME24, respectively. Early SFT model observe higher Pass@K (y-axis) while later SFT model observes higher Pass@1 (x-axis). The interpolated model observe best of both metrics." + ], + "image_footnote": [], + "bbox": [ + 117, + 97, + 364, + 281 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/6c5fa0e260a30825c41c9e0eaa75949a07f90449475e4e78fc1c1a49f05915b4.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 377, + 98, + 620, + 282 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/5137cb2f59693ab411059d8208f1a96ff60109df3970efcaaad046b30b11e30c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 638, + 98, + 880, + 282 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Better Test-Time Scaling This boost in both Pass@1 and Pass@K directly translates to better performance with test-time scaling. We measure Best@K by Majority Vote and by selecting the reasoning trace with highest reward using an off-the-shelf ORM RLHFlow/Llama3.1-8B-PRM-Deepseek-Data (Xiong et al., 2024). We evaluate the performance of the last SFT checkpoint with highest Pass@1 versus the corresponding WiSE-FT variant with $\\delta = 1/2$ . In Figure 3, we see that the performance gap on MATH500 between the final Gemma-2-2B SFT checkpoint and Wise-FT model widens with larger $K$ . The WiSE-FT model achieves $5 - 7\\%$ better performance with test-time scaling.", + "bbox": [ + 109, + 405, + 883, + 512 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Better RL Scaling WiSE-FT's ability to achieve both high Pass@1 and Pass@K is particularly advantageous for continued RL training where models are further trained by policy gradient methods using self-generated data. In particular, WiSE-FT is able to generate data rich in learning signal (high Pass@1) while still having high coverage over the data space (high Pass@K). We continue training on rephrased training questions of GSM8K and MATH using GRPO paired with a binary reward of the correctness of the final guess. Across runs, we observe that continued RL training starting from the final WiSE-FT model improves performance more stably than finetuning starting from the final SFT checkpoint. Notably the final SFT checkpoint suffers low coverage over the data space, causing Pass@1 to improve slowly. We also try continued RL training from an earlier SFT checkpoint with peak Pass@4 performance. While RL scales better over the early SFT checkpoint in comparison to the final checkpoint, the performance still remains subpar compared to WiSE-FT.", + "bbox": [ + 109, + 550, + 883, + 717 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.1 General Purpose Reasoning Models", + "text_level": 1, + "bbox": [ + 112, + 734, + 467, + 753 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "So far we have studied the effect of WiSE-FT on models tuned on reasoning data for the same specific reasoning task (e.g., train on GSM8k and evaluate on GSM8k). We've additionally tested how well our findings generalize to models trained on general purpose reasoning datasets and tested on a out-of-distribution reasoning task. We take Qwen2.5-7B-Instruct and SFT for 5 epochs on OpenThoughts-114k, a high-quality synthetic dataset of math, science, and coding questions paired with DeepSeek-R1 completions, then evaluate its performance on AIME24 competition problems (with ASY code for figures from Muennighoff et al. (2025)). In this setting, the Pass@K trends during SFT on is more subtle. We still observe diversity collapse in Figure 12, but the affect is not strong enough for Pass@K to drop back down. However, we observe that the rate at which Pass@K improves for $K \\in \\{16,32\\}$ slows down early while Pass@1 grows at a constant rate (Figure 10). We then perform WiSE-FT between the final and earlier checkpoint with", + "bbox": [ + 109, + 766, + 883, + 919 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 114, + 32, + 431, + 47 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 941, + 504, + 954 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/4784db23bc7951ee2fc6656f65f8c2d3d009a5771392f48ebe1e24aa859028e1.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 117, + 95, + 336, + 281 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/281b7e3b04727e31eb48c9f9eb0dac923c0ed6c74f21659a0a4d939eab7dbcdc.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 341, + 97, + 550, + 281 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/e3b0978da07922462da538218a0295754fb4a5a2ab33dcf01d466532d3e49fa5.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 560, + 99, + 870, + 284 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/38159ef7ae6f51b78b1f51b27ac07d7019b9006e97ddbd5054b02d32b076acac.jpg", + "image_caption": [ + "(a)", + "(b)" + ], + "image_footnote": [], + "bbox": [ + 117, + 309, + 552, + 515 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/e8032220c0101b093c7abbf09b817da62b4a68f6faf283ab9fe582fdf70c1c5e.jpg", + "image_caption": [ + "(c)", + "Figure 3: Downstream Advantages of WiSE-FT: (a) Best@K on MATH500 of the final SFT Gemma2-2B checkpoint and its WiSE-FT counterpart. (b) Pass@K on AIME24 WiSE-FT after SFT on general purpose reasoning dataset OpenThoughts-114k achieves higher Pass@K on AIME24. (c) RL Scaling Gemma and Qwen SFT checkpoints further tuned by GRPO on GSM8K and MATH, respectively. RL from the final WiSE-FT model achieves higher Pass@1 with less data compared to GRPO starting from both early and late SFT checkpoints." + ], + "image_footnote": [], + "bbox": [ + 563, + 292, + 877, + 512 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "higher diversity. We choose early checkpoint at epoch 3 where improvements in Pass@K begin to slow. Similarly, we observe that WiSE-FT improves both Pass@1 and Pass@K in Figure 2.", + "bbox": [ + 109, + 669, + 883, + 702 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5 Diversity Collapse during Finetuning", + "text_level": 1, + "bbox": [ + 112, + 718, + 552, + 739 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In previous sections we alluded to the phenomenon where $\\mathrm{Pass}@\\mathrm{K}$ decreases because SFT and RL induces diversity collapse in reasoning traces. To verify this hypothesis, we sample 100 traces per test GSM8k problem and measure diversity using three metrics:", + "bbox": [ + 109, + 753, + 883, + 801 + ], + "page_idx": 5 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. Answer Diversity: The fraction of unique guesses $\\hat{y}$ among reasoning traces.", + "2. Operation Diversity: The fraction of unique sequence of arithmetic operations performed among reasoning traces (In GSM8k, each intermediate step consists of a basic arithmetic operation, e.g. $5 + 3 = 8$ ).", + "3. Semantic Diversity: The average cosine similarity between the text embeddings of the reasoning traces, computed using Stella-400M-v5 (Zhang et al., 2024a)" + ], + "bbox": [ + 150, + 813, + 885, + 919 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 114, + 32, + 431, + 47 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 493, + 943, + 504, + 954 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/97906774fe3390f2c8dce9d365178b77721d1991265e137bf86ad95237738532.jpg", + "image_caption": [ + "Figure 4: Diversity Collapse The answer, semantic, and operation diversity of Gemma-2-2B reasoning traces across GSM8k test examples. Colors map to different SFT checkpoints." + ], + "image_footnote": [], + "bbox": [ + 161, + 119, + 343, + 252 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/c09374f5d22f90dca1bd1db2225d9246275d5d6c3a44de40384a8f4f0172de11.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 352, + 119, + 537, + 252 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/6b301ba7c226d983f2678b214edd7278f1cfb4320bcae13b40fbe1456b86da77.jpg", + "image_caption": [ + "Diversity Across SFT $[T = 0.8]$" + ], + "image_footnote": [], + "bbox": [ + 544, + 112, + 723, + 252 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/0ca043e2ceef566089e4e9de31db5711daf3366c9e45fb0dd7cea3659355b3d9.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 728, + 111, + 828, + 247 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/33b82e197b5263aaec61d52cf001bea59054b3e26d2244a7f955f99bbd538652.jpg", + "image_caption": [ + "Figure 5: Pass@k for SFT and RL of Qwen-2.5-0.5B on GSM8K. The purple solid line measures Pass@K across SFT steps, while the dashed lines correspond to further training different checkpoints by Proximal Policy Optimization (PPO). While Pass@1 continues to improve, Pass@k for larger K can decrease even with RL." + ], + "image_footnote": [], + "bbox": [ + 117, + 323, + 883, + 488 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "As shown in Figure 4, we observe a stark trend where longer SFT on Gemma-2-2B incrementally suffers from clear diversity collapse across all diversity metrics. Specifically, the model places most of its probability mass not only on one particular guess, but on a single reasoning trace, as evidenced by the reduced semantic and operation diversity.", + "bbox": [ + 109, + 590, + 883, + 654 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "5.1 Theoretical Discussion of Diversity Collapse During SFT and RL", + "text_level": 1, + "bbox": [ + 111, + 667, + 718, + 686 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We assess theoretically why diversity collapse tends to arise during SFT and RL training. Our analysis reveals that while SFT and RL operate on different principles, they share common pathways that lead to reduced generation diversity when optimizing for accuracy.", + "bbox": [ + 109, + 699, + 883, + 747 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Diversity Collapse during SFT Overparameterized models are well-known to exhibit overconfidence in their predictions, an effect that has been studied extensively in classification (Guo et al., 2017). In particular, the model's confidence towards the most likely class $P(\\hat{y} = k_{\\max} \\mid x)$ is often much higher than the model's accuracy. In binary classification with linear models $f(x) = \\sigma(\\langle \\boldsymbol{w}, \\boldsymbol{x} \\rangle)$ and linearly separable training data, gradient descent provably drives the norm of the weights to infinity, causing probabilities to collapse to 0 or 1 (Soudry et al., 2018). We demonstrate this in linear models in Appendix A. A similar phenomenon likely arises in large reasoning models, which may also be prone to overfitting during SFT, ultimately leading to overly confident solutions in spite of limited coverage over the space of traces (Cobbe et al., 2021).", + "bbox": [ + 109, + 781, + 883, + 917 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 114, + 32, + 431, + 47 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 493, + 941, + 503, + 953 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Diversity Collapse during RL We further prove why applying reinforcement learning to a low-diversity policy yields suboptimal results—and sometimes even exacerbates diversity collapse—in a discrete bandit setting (see Figure 5). In this scenario, we assume there exist $K$ equally good arms, corresponding to a set of successful strategies, and one bad arm that the policy should learn to avoid. We show two key results in this setting:", + "bbox": [ + 109, + 95, + 883, + 174 + ], + "page_idx": 7 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. Implicit Collapse of Policy Diversity without KL Regularization. Our analysis demonstrates that when standard reinforcement learning algorithms—REINFORCE and GRPO—are applied without KL regularization, the training dynamics inevitably lead to a collapse in output diversity. Although multiple arms (actions) are equally optimal, the updates become self-enforcing as training progresses. Once one of the good arms is randomly reinforced, its probability increases at the expense of the others, ultimately driving the policy to converge on a single-arm strategy (Theorem C.1).", + "2. Diversity Does Not Increase with KL Regularization. When KL regularization is incorporated to constrain the divergence from the initial policy in REINFORCE, the final policy no longer collapses into a single-arm strategy. However, the diversity of the converged policy cannot exceed the initial diversity. Concretely, we show that the probability distribution over the good arms remains proportional to the initial distribution when the RL algorithm converges (Theorem C.8). This explains why initializing with a diverse policy is critical for the generalization of reinforcement learning." + ], + "bbox": [ + 112, + 194, + 885, + 393 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "6 Bias-Variance Tradeoff of Pass@K", + "text_level": 1, + "bbox": [ + 112, + 412, + 517, + 431 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "So far, we saw a mismatch in growth of Pass@1 and Pass@K during SFT and alluded to the impact of diversity collapse to Pass@K. We now formalize the relationship between Pass@1, Pass@K, and diversity collapse. Notably, we show that the upper bound of expected Pass@K over the test distribution can be decomposed into bias and variance quantities.", + "bbox": [ + 109, + 455, + 885, + 518 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "6.1 Diversity Collapse leads to Bimodal Pass@1 Distribution", + "text_level": 1, + "bbox": [ + 111, + 537, + 653, + 555 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Consider the expected $\\mathrm{Pass}@\\mathrm{K}$ over the entire test distribution $x, y \\sim \\mathcal{D}$ . By Jensen's inequality, we can derive a straightforward upper bound of expected $\\mathrm{Pass}@\\mathrm{K}$ that decomposes into the bias and variance of $1 - \\rho_x$ (See proof in Appendix B). Note that the upper bound falls monotonically with larger bias and variance:", + "bbox": [ + 109, + 570, + 885, + 632 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\n\\textbf {P r o p o s i t i o n 6 . 1 .} \\mathbb {E} _ {x, y \\sim \\mathcal {D}} [ \\operatorname {P a s s} @ \\mathrm {K} (x) ] \\leq 1 - ((\\underbrace {\\mathbb {E} _ {x , y \\sim \\mathcal {D}} [ 1 - \\rho_ {x} ]} _ {\\text {B i a s}}) ^ {2} + \\underbrace {\\operatorname {V a r} (\\rho_ {x})} _ {\\text {V a r i a n c e}}) ^ {k / 2}\n$$\n", + "text_format": "latex", + "bbox": [ + 151, + 661, + 751, + 700 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In Figure 6b, we plot the distribution of error $1 - \\rho_{x}$ , estimated using 100 sampled traces, over GSM8K test examples. We notice two trends with longer SFT. First, bias decreases, i.e., the expected error shifts towards 0. However, the distribution becomes increasingly bimodal with the densities converging towards the two extremes 0 and 1. As a result, the variance increases with longer SFT. This increase in variance directly explains the drop in Pass@k.", + "bbox": [ + 109, + 728, + 883, + 806 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "The bimodality of the $1 - \\rho_{x}$ distribution means that the Pass@1 of any test problem is either very high or very low. Interestingly, one explanation for the increased bimodality of the distribution of $1 - \\rho_{x}$ is in fact when models suffer from diversity collapse. In other words, a particular guess to be oversampled for each test problem. If the model places high probability on an incorrect guess, Pass@1 is very low. On the other hand, if the model places high probability on the correct guess, Pass@1 is very high. We illustrate this relationship in Figure 6a. All in all, Pass@K can be improved in two ways - either reduce bias by improving Pass@1 or reduce variance by increasing diversity.", + "bbox": [ + 109, + 811, + 885, + 921 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 112, + 32, + 431, + 47 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 493, + 941, + 504, + 954 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/105ec9b2f770f3fd0623182efd6b919cba9dbb10c81c24b7a52fedf2e526985a.jpg", + "image_caption": [ + "(a)" + ], + "image_footnote": [], + "bbox": [ + 116, + 94, + 885, + 250 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/401631f09a461d1e42f689302a470ca43729d0d0654d63edcc98eccaf75cd6ba.jpg", + "image_caption": [ + "(b)", + "Figure 6: Histogram of error $1 - \\rho_{x}$ of Gemma-2-2B SFT checkpoints across GSM8k test. SFT progressively decreases bias but increases variance of error i.e., $1 - \\mathrm{Pass}@\\mathrm{l}$ , across the test distribution, causing Pass@K to fall. Applying Wise-FT reduces both bias and variance, but temperature scaling trades off decreasing variance with increased bias." + ], + "image_footnote": [], + "bbox": [ + 116, + 271, + 888, + 455 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "6.2 WiSE-FT vs. Diverse Decoding", + "text_level": 1, + "bbox": [ + 112, + 577, + 421, + 595 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "While we've proposed inducing diversity by WiSE-FT, another common alternative for inducing diversity is temperature scaling the logits. High temperature smoothens the logits allowing the model to more likely sample low probability tokens. In Figure 1, we see that while high temperatures indeed improve Pass@K, the Pass@K at any SFT timestep notably never reaches the Pass@K of our final WiSE-FT model. If temperature scaling also increases diversity, why does WiSE-FT strictly outperform sampling with high temperature? In Figure 6b, we plot the distribution of $1 - \\rho_{x}$ if we sample from the last SFT checkpoint with high temperature $T = 1.5$ . As expected, we see that the model reasons more diversely. This smoothens the bimodal peaks and reduces the variance. However, the average accuracy of the model generations also degrades, causing the bias goes back up. We suspect bias-variance tradeoff is inherent in diversity-inducing decoding approaches. For example, min-p (Nguyen et al., 2024) combines temperature scaling with adaptive thresholding to not sample outlier tokens. However, this additional control is unable to reduce bias (Figure 16). Surprisingly, WiSE-FT uniquely manages to reduce both bias and variance.", + "bbox": [ + 109, + 606, + 887, + 790 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "7 Discussion", + "text_level": 1, + "bbox": [ + 112, + 805, + 267, + 823 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "In this work, we investigated the phenomenon of diversity collapse during the training of reasoning models. Our analysis reveals that standard SFT and RL pipelines can deteriorate in Pass@ $K$ due to the convergence of model generations toward a single reasoning trace. We demonstrated that WiSE-FT, which interpolates between early and late SFT checkpoints, significantly improves both Pass@1 and Pass@ $K$ across multiple math datasets and model scales. This is unlike alternative approaches such as temperature scaling or early", + "bbox": [ + 109, + 840, + 887, + 921 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 114, + 32, + 431, + 47 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 493, + 941, + 504, + 953 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "stopping, which face an inherent tradeoff. Furthermore, improving on these metrics corresponded with better adaptation to test-time scaling and RL. But other limitations of WiSE-FT may exist at larger scale, which we leave for future work.", + "bbox": [ + 109, + 95, + 887, + 142 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Overall, our work reveals the importance of maintaining diversity in reasoning models. Current decoding strategies (e.g., min-p, nucleus, and top-k) are still unable to fully extract a model's capabilities. We estimate that a significant gap, of tens of percent, remains compared to the optimal decoding strategy for Pass@K, i.e., top-K sampling over the model's marginal answer distribution $P(\\hat{y} \\mid x)$ (see Table 1 and Appendix G). We encourage future works to address downstream limitations more carefully in earlier stages of the training pipeline.", + "bbox": [ + 109, + 148, + 591, + 287 + ], + "page_idx": 9 + }, + { + "type": "table", + "img_path": "images/268a4214b18b790ae18f4c66932eb03706a3662c1ff2bbd2235424d0bcd92783.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodPass@2Pass@4
Nucleus0.570.67
Min-p0.570.67
Top-k0.560.67
Optimal0.760.83
", + "bbox": [ + 625, + 165, + 864, + 247 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Table 1: Best Pass@k of Gemma on GSM8k across SFT checkpoints", + "bbox": [ + 598, + 256, + 888, + 289 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "8 Acknowledgements", + "text_level": 1, + "bbox": [ + 112, + 301, + 364, + 324 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "We'd like to thank Aviral Kumar, Sean Welleck, Amrith Setlur and Yiding Jiang for insightful discussions about test-time scaling and reinforcement learning. We'd also like to thank Alex Li, Sachin Goyal, and Jacob Springer for their meaningful contribution to our figures and literature review. We gratefully acknowledge support from Apple, Google, Cisco, OpenAI, NSF, Okawa foundation, the AI2050 program at Schmidt Sciences (Grant #G2264481), and Bosch Center for AI.", + "bbox": [ + 109, + 338, + 885, + 416 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 112, + 430, + 235, + 450 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "AlphaProof and AlphaGeometry teams. Ai achieves silver-medal standard solving international mathematical olympiad problems, jul 2024. URL https://deepmind.google/discover/blog/ai-solves-imo-problems-at-silver-medal-level/.", + "Edward Beeching, Lewis Tunstall, and Sasha Rush. Scaling test-time compute with open models, 2024. URL https://huggingface.co/spaces/HuggingFaceH4/blogpost-scaling-test-time-compute.", + "Jeff Bilmes. Submodularity in machine learning and artificial intelligence. arXiv preprint arXiv:2202.00132, 2022.", + "Feng Chen, Allan Raventos, Nan Cheng, Surya Ganguli, and Shaul Druckmann. Rethinking fine-tuning when scaling test-time compute: Limiting confidence improves mathematical reasoning. arXiv preprint arXiv:2502.07154, 2025.", + "Guoxin Chen, Minpeng Liao, Chengxi Li, and Kai Fan. Alphamath almost zero: Process supervision without process, 2024. URL https://arxiv.org/abs/2405.03553.", + "Yinlam Chow, Guy Tennenholtz, Izzeddin Gur, Vincent Zhuang, Bo Dai, Sridhar Thiagarajan, Craig Boutilier, Rishabh Agarwal, Aviral Kumar, and Aleksandra Faust. Inference-aware fine-tuning for best-of-n sampling in large language models. arXiv preprint arXiv:2412.15287, 2024a.", + "Yinlam Chow, Guy Tennenholtz, Izzeddin Gur, Vincent Zhuang, Bo Dai, Sridhar Thiagarajan, Craig Boutilier, Rishabh Agarwal, Aviral Kumar, and Aleksandra Faust. Inference-aware fine-tuning for best-of-n sampling in large language models, 2024b. URL https://arxiv.org/abs/2412.15287.", + "Tianzhe Chu, Yuexiang Zhai, Jihan Yang, Shengbang Tong, Saining Xie, Dale Schuurmans, Quoc V. Le, Sergey Levine, and Yi Ma. Sft memorizes, rl generalizes: A comparative study of foundation model post-training, 2025. URL https://arxiv.org/abs/2501.17161.", + "Karl Cobbe, Vineet Kosaraju, Mohammad Bavarian, Mark Chen, Heewoo Jun, Lukasz Kaiser, Matthias Plappert, Jerry Tworek, Jacob Hilton, Reiichiro Nakano, Christopher Hesse, and John Schulman. Training verifiers to solve math word problems, 2021. URL https://arxiv.org/abs/2110.14168." + ], + "bbox": [ + 112, + 467, + 885, + 919 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 112, + 32, + 431, + 47 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 490, + 941, + 508, + 954 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Chuan Guo, Geoff Pleiss, Yu Sun, and Kilian Q Weinberger. On calibration of modern neural networks. In International conference on machine learning, pp. 1321-1330. PMLR, 2017.", + "Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948, 2025.", + "Dan Hendrycks, Collin Burns, Saurav Kadavath, Akul Arora, Steven Basart, Eric Tang, Dawn Song, and Jacob Steinhardt. Measuring mathematical problem solving with the math dataset, 2021. URL https://arxiv.org/abs/2103.03874.", + "Ari Holtzman, Jan Buys, Li Du, Maxwell Forbes, and Yejin Choi. The curious case of neural text degeneration, 2020. URL https://arxiv.org/abs/1904.09751.", + "Audrey Huang, Adam Block, Dylan J Foster, Dhruv Rohatgi, Cyril Zhang, Max Simchowitz, Jordan T Ash, and Akshay Krishnamurthy. Self-improvement in language models: The sharpening mechanism. arXiv preprint arXiv:2412.01951, 2024.", + "Yifei Li, Zeqi Lin, Shizhuo Zhang, Qiang Fu, Bei Chen, Jian-Guang Lou, and Weizhu Chen. Making large language models better reasoners with step-aware verifier, 2023. URL https://arxiv.org/abs/2206.02336.", + "Ziniu Li, Congliang Chen, Tian Xu, Zeyu Qin, Jiancong Xiao, Zhi-Quan Luo, and Ruoyu Sun. Preserving diversity in supervised fine-tuning of large language models. In The Thirteenth International Conference on Learning Representations, 2025. URL https://openreview.net/forum?id=NQEe7B7bSw.", + "Hunter Lightman, Vineet Kosaraju, Yura Burda, Harri Edwards, Bowen Baker, Teddy Lee, Jan Leike, John Schulman, Ilya Sutskever, and Karl Cobbe. Let's verify step by step. arXiv preprint arXiv:2305.20050, 2023.", + "Niklas Muennighoff, Zitong Yang, Weijia Shi, Xiang Lisa Li, Li Fei-Fei, Hannaneh Hajishirzi, Luke Zettlemoyer, Percy Liang, Emmanuel Candès, and Tatsunori Hashimoto. s1: Simple test-time scaling, 2025. URL https://arxiv.org/abs/2501.19393.", + "Minh Nguyen, Andrew Baker, Clement Neo, Allen Roush, Andreas Kirsch, and Ravid Shwartz-Ziv. Turning up the heat: Min-p sampling for creative and coherent llm outputs, 2024. URL https://arxiv.org/abs/2407.01082.", + "Theo X. Olausson, Jeevana Priya Inala, Chenglong Wang, Jianfeng Gao, and Armando Solar-Lezama. Is self-repair a silver bullet for code generation?, 2024. URL https://arxiv.org/abs/2306.09896.", + "Pier Giuseppe Sessa, Robert Dadashi, Léonard Hussenot, Johan Ferret, Nino Vieillard, Alexandre Ramé, Bobak Shariari, Sarah Perrin, Abe Friesen, Geoffrey Cideron, Sertan Girgin, Piotr Stanczyk, Andrea Michi, Danila Sinopalnikov, Sabela Ramos, Amélie Héliou, Aliaksei Severyn, Matt Hoffman, Nikola Momchev, and Olivier Bachem. Bond: Aligning llms with best-of-n distillation, 2024. URL https://arxiv.org/abs/2407.14622.", + "Amrith Setlur, Chirag Nagpal, Adam Fisch, Xinyang Geng, Jacob Eisenstein, Rishabh Agarwal, Alekh Agarwal, Jonathan Berant, and Aviral Kumar. Rewarding progress: Scaling automated process verifiers for llm reasoning, 2024. URL https://arxiv.org/abs/2410.08146.", + "Louis Shao, Stephan Gouws, Denny Britz, Anna Goldie, Brian Strope, and Ray Kurzweil. Generating high-quality and informative conversation responses with sequence-to-sequence models. arXiv preprint arXiv:1701.03185, 2017.", + "Charlie Snell, Jaehoon Lee, Kelvin Xu, and Aviral Kumar. Scaling llm test-time compute optimally can be more effective than scaling model parameters, 2024. URL https://arxiv.org/abs/2408.03314.", + "Yuda Song, Hanlin Zhang, Carson Eisenach, Sham Kakade, Dean Foster, and Udaya Ghai. Mind the gap: Examining the self-improvement capabilities of large language models. arXiv preprint arXiv:2412.02674, 2024." + ], + "bbox": [ + 112, + 95, + 883, + 917 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 112, + 32, + 431, + 47 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 490, + 943, + 506, + 954 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Daniel Soudry, Elad Hoffer, Mor Shpigel Nacson, Suriya Gunasekar, and Nathan Srebro. The implicit bias of gradient descent on separable data. Journal of Machine Learning Research, 19(70):1-57, 2018.", + "Ashwin K Vijayakumar, Michael Cogswell, Ramprasath R. Selvaraju, Qing Sun, Stefan Lee, David Crandall, and Dhruv Batra. Diverse beam search: Decoding diverse solutions from neural sequence models, 2018. URL https://arxiv.org/abs/1610.02424.", + "Xuezhi Wang, Jason Wei, Dale Schuurmans, Quoc Le, Ed Chi, Sharan Narang, Aakanksha Chowdhery, and Denny Zhou. Self-consistency improves chain of thought reasoning in language models, 2023. URL https://arxiv.org/abs/2203.11171.", + "Mitchell Wortsman, Gabriel Ilharco, Jong Wook Kim, Mike Li, Simon Kornblith, Rebecca Roelofs, Raphael Gontijo-Lopes, Hannaneh Hajishirzi, Ali Farhadi, Hongseok Namkoong, and Ludwig Schmidt. Robust fine-tuning of zero-shot models, 2022. URL https://arxiv.org/abs/2109.01903.", + "Yangzhen Wu, Zhiqing Sun, Shanda Li, Sean Welleck, and Yiming Yang. Inference scaling laws: An empirical analysis of compute-optimal inference for problem-solving with language models. arXiv preprint arXiv:2408.00724, 2024.", + "Wei Xiong, Hanning Zhang, Nan Jiang, and Tong Zhang. An implementation of generative prm. https://github.com/RLHFlow/RLHF-Reward-Modeling, 2024.", + "Edward Yeo, Yuxuan Tong, Morry Niu, Graham Neubig, and Xiang Yue. Demystifying long chain-of-thought reasoning in llms, 2025. URL https://arxiv.org/abs/2502.03373.", + "Longhui Yu, Weisen Jiang, Han Shi, Jincheng Yu, Zhengying Liu, Yu Zhang, James T Kwok, Zhenguo Li, Adrian Weller, and Weiyang Liu. Metamath: Bootstrap your own mathematical questions for large language models. arXiv preprint arXiv:2309.12284, 2023.", + "Dun Zhang, Jiacheng Li, Ziyang Zeng, and Fulong Wang. Jasper and stella: distillation of sota embedding models. arXiv preprint arXiv:2412.19048, 2024a.", + "Yiming Zhang, Avi Schwarzschild, Nicholas Carlini, Zico Kolter, and Daphne Ippolito. Forcing diffuse distributions out of language models, 2024b. URL https://arxiv.org/abs/2404.10859." + ], + "bbox": [ + 112, + 95, + 883, + 566 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 114, + 32, + 431, + 47 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 490, + 941, + 506, + 954 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "A SFT in Binary Classification", + "text_level": 1, + "bbox": [ + 112, + 95, + 457, + 116 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Data and Model Setup We train a linear classifier $f(\\pmb{x}) = \\langle \\pmb{w}, \\pmb{x} \\rangle$ from random initialization over a binary Gaussian mixture distribution:", + "bbox": [ + 109, + 138, + 883, + 170 + ], + "page_idx": 12 + }, + { + "type": "equation", + "text": "\n$$\nx \\mid y \\sim \\mathcal {N} (y \\boldsymbol {\\mu}, I ^ {d \\times d}) \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 423, + 185, + 883, + 205 + ], + "page_idx": 12 + }, + { + "type": "equation", + "text": "\n$$\ny \\in \\{1, - 1 \\} \\text {u n i f o r m l y} \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 411, + 208, + 883, + 224 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Given a model, we sample predictions, namely $\\hat{y} = 1$ with probability $\\sigma (\\langle \\pmb {w},\\pmb {x}\\rangle) = (1 + \\exp (-\\langle \\pmb {w},\\pmb {x}\\rangle))^{-1}$ , or $\\hat{y} = 0$ . Then, per-example Pass@1 is equal to $\\rho_{x} = \\sigma (y\\cdot \\langle \\pmb {w},\\pmb {x}\\rangle)$ . Similarly, the expected Pass@k is equal to $1 - (1 - \\rho_{x})^{k}$ .", + "bbox": [ + 109, + 241, + 885, + 291 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "In our experiment, we train an overparametrized linear classifier over binary Gaussian data mixture $x \\mid y \\sim \\mathcal{N}(y \\cdot \\frac{1}{\\sqrt{d}} \\mathbf{1}, \\frac{1}{2} I)$ where $y = \\{-1, 1\\}$ and $d = 1000$ . We then evaluate $\\rho_x$ of 400 test samples. As training progresses, the distribution of $\\rho_x$ over the test data becomes bimodal due to the norm of $w$ monotonically increasing once it separates the training examples. Similarly, we observe that this leads to a drop in Pass@k while Pass@1 continues to improve.", + "bbox": [ + 109, + 296, + 679, + 409 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/110fab68254d7edd76626c12dee15bae4c5510f3d1620d88c62cdb6cd3e849b2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 692, + 289, + 877, + 404 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/136c97bd309561e7ca54a0ca2069ad3f0b521147a93b614b1fb712de45f0c740.jpg", + "image_caption": [ + "Figure 8: Pass@k across Training in Binary Classification" + ], + "image_footnote": [], + "bbox": [ + 130, + 454, + 272, + 561 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/235ace59d9f3a9fd4138d33eda84ec30aa842d50403a6325e09b14c22038f792.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 279, + 455, + 421, + 560 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/bd859f21719d1d07b48f5e32d3cf6033dc8039de333fc86dee2565d05cfa3961.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 429, + 455, + 571, + 560 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/3b4b375b012d324a664828356e9ada89ab2d796bd0c978efdd3c6ccc0233b487.jpg", + "image_caption": [ + "Figure 7: Weight Norm" + ], + "image_footnote": [], + "bbox": [ + 575, + 455, + 718, + 560 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/09a06b40fa4ea2f11c53a1fb0b61297195528e6be636a1b05e4aa3162e56bcea.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 728, + 455, + 870, + 560 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/672c0c412effe3f158ea40afc7de6980e89697915f6ff25df8aa79757cb6e4a2.jpg", + "image_caption": [ + "Figure 9: Histogram of $\\rho_{x}$ across training steps" + ], + "image_footnote": [], + "bbox": [ + 124, + 642, + 272, + 748 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/9314a20b371980d52ecbb3503a73f93a81b6bb2a673d0623309bc41fbb73253e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 276, + 641, + 419, + 747 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/525268009e6075c1325653ae3ac1f4d2d550342360d9da06d7710798d251f1d6.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 426, + 641, + 571, + 747 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/5a57a6efe0c941d086713a9c86373744f480001ca15dc55fec49bde54b75585e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 578, + 641, + 722, + 747 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/b1e638e906973e22c65e08bce79f7b716ac0fa8700587808cf2a8fb6a77abce6.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 728, + 641, + 874, + 748 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "B Expected Pass@k", + "text_level": 1, + "bbox": [ + 112, + 820, + 344, + 840 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Proposition B.1.", + "bbox": [ + 112, + 862, + 248, + 878 + ], + "page_idx": 12 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbb {E} _ {x, y \\sim \\mathcal {D}} \\left[ \\mathrm {P a s s @ K} (x) \\right] \\leq 1 - \\left(\\left(\\mathbb {E} _ {x, y \\sim \\mathcal {D}} [ 1 - \\rho_ {x} ]\\right) ^ {2} + \\mathrm {V a r} (\\rho_ {x})\\right) ^ {k / 2}\n$$\n", + "text_format": "latex", + "bbox": [ + 264, + 893, + 732, + 916 + ], + "page_idx": 12 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 112, + 32, + 434, + 47 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 490, + 941, + 508, + 954 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Proof.", + "bbox": [ + 112, + 95, + 161, + 112 + ], + "page_idx": 13 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\mathbb {E} \\left[ (1 - \\rho_ {x}) ^ {k} \\right] \\geq \\mathbb {E} \\left[ (1 - \\rho_ {X}) ^ {2} \\right] ^ {k / 2} (5) \\\\ = \\left(1 - 2 \\mathbb {E} \\left[ \\rho_ {x} \\right] + \\mathbb {E} \\left[ \\rho_ {x} ^ {2} \\right]\\right) ^ {k / 2} (6) \\\\ = \\left(\\left(1 - 2 \\mathbb {E} [ \\rho_ {x} ] + \\mathbb {E} [ \\rho_ {x} ] ^ {2}\\right) + \\left(\\mathbb {E} \\left[ \\rho_ {x} ^ {2} \\right] - \\mathbb {E} [ \\rho_ {x} ] ^ {2}\\right)\\right) ^ {k / 2} (7) \\\\ = \\left(\\left(1 - \\mathbb {E} [ \\rho_ {x} ]\\right) ^ {2} + \\operatorname {V a r} (\\rho_ {x})\\right) ^ {k / 2} (8) \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 245, + 119, + 883, + 234 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/d6563642ee50cd803af3a0e79797353c9f1d469062b359c5cc621ad0702e4063.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 864, + 241, + 883, + 253 + ], + "page_idx": 13 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 114, + 32, + 431, + 47 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 490, + 941, + 508, + 954 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "C RL Theory", + "text_level": 1, + "bbox": [ + 112, + 95, + 274, + 116 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "C.1 Overview", + "text_level": 1, + "bbox": [ + 112, + 132, + 240, + 148 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "We will prove that in a discrete bandit setting with $K$ equally good arms that is the best arm, both REINFORCE and GRPO without KL regularization will eventually collapse into a single-arm strategy.", + "bbox": [ + 109, + 160, + 883, + 193 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "We will further prove that, with KL regularization with respect to the initial policy, the converged policy of REINFORCE have the same action distribution as the initial policy when constrained on the set of best arms. Therefore, diversity within good actions will not increase through REINFORCE training.", + "bbox": [ + 109, + 198, + 883, + 246 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "C.2 Notations and Setup", + "text_level": 1, + "bbox": [ + 112, + 258, + 334, + 277 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Formally we consider the following setting. We consider a $K + 1$ -armed bandit, with arms $\\{1,2,\\dots ,K + 1\\}$ . Arms $1,\\ldots ,K$ are \"good,\" each yielding reward 1, and the other arm is \"bad,\" yielding reward 0. We use a softmax parameterization:", + "bbox": [ + 109, + 287, + 885, + 333 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\np _ {i} = \\frac {e ^ {\\theta_ {i}}}{\\sum_ {j = 1} ^ {K + 1} e ^ {\\theta_ {j}}}, \\quad i = 1, \\dots , K + 1.\n$$\n", + "text_format": "latex", + "bbox": [ + 352, + 333, + 640, + 375 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "to denote the action distribution. We will use $\\theta_i^{(t)}$ to denote the parameter at step $t$ .", + "bbox": [ + 111, + 378, + 723, + 398 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "It is standard to consider using the KL divergence between the current policy with a reference policy (which we set as $p_0$ here) as a regularization term.", + "bbox": [ + 109, + 402, + 883, + 434 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\n\\mathrm {K L} (p ^ {(t)} | p ^ {(0)}) = \\sum_ {i = 1} ^ {K + 1} p _ {i} ^ {(t)} \\log \\frac {p _ {i} ^ {(t)}}{p _ {i} ^ {(0)}}\n$$\n", + "text_format": "latex", + "bbox": [ + 372, + 436, + 620, + 483 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "For REINFORCE, we will consider the following training setup. At step $t$ :", + "bbox": [ + 111, + 492, + 651, + 508 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. We sample an arm $I_{t}$ according to $p(\\cdot) = (p_1^{(t)},\\dots ,p_{K + 1}^{(t)})$ and receive reward $r_t$", + "2. We update using policy gradient." + ], + "bbox": [ + 150, + 518, + 759, + 558 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\n\\theta_ {i} ^ {(t + 1)} = \\theta_ {i} ^ {(t)} + \\eta r _ {t} \\nabla_ {\\theta_ {i}} (\\log p _ {I _ {t}} ^ {(t)}) - \\eta \\beta \\nabla_ {\\theta_ {i}} \\mathrm {K L} (p ^ {(t)} | p ^ {(0)}), i = 1, \\dots , K + 1,\n$$\n", + "text_format": "latex", + "bbox": [ + 228, + 561, + 823, + 585 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "where $\\eta > 0$ is the step size and $\\beta$ is the hyperparameter controlling the strength of KL regularization.", + "bbox": [ + 169, + 585, + 883, + 617 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "For GRPO, we will consider the following simplified training setup. This is equivalent to the empirical version of GRPO with online sampling.", + "bbox": [ + 109, + 628, + 883, + 660 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. Sample $G$ arms $\\{I_t^{(1)},\\dots ,I_t^{(G)}\\}$ i.i.d. from the current policy $p(\\cdot)$ and receive rewards $r_t^{(g)}$ .", + "2. Compute" + ], + "bbox": [ + 150, + 669, + 836, + 709 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\n\\mu_ {t} = \\frac {1}{G} \\sum_ {g = 1} ^ {G} r _ {t} ^ {(g)}, \\quad \\sigma_ {t} = \\sqrt {\\frac {1}{G} \\sum_ {g = 1} ^ {G} \\left(r _ {t} ^ {(g)} - \\mu_ {t}\\right) ^ {2}},\n$$\n", + "text_format": "latex", + "bbox": [ + 344, + 710, + 707, + 756 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "and define the normalized advantage", + "bbox": [ + 169, + 757, + 444, + 773 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{r} \\tilde {r} _ {t} ^ {(g)} = \\left\\{ \\begin{array}{l l} \\frac {r _ {t} ^ {(g)} - \\mu_ {t}}{\\sigma_ {t}}, & \\sigma_ {t} \\neq 0, \\\\ 0, & \\sigma_ {t} = 0. \\end{array} \\right. \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 411, + 777, + 640, + 834 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "We will skip the update if $\\sigma_t = 0$ .", + "bbox": [ + 169, + 835, + 419, + 853 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "3. Update each parameter $\\theta_{i}$ via", + "bbox": [ + 150, + 856, + 390, + 872 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\n\\theta_ {i} \\gets \\theta_ {i} + \\frac {\\eta}{G} \\sum_ {g = 1} ^ {G} \\widehat {r} _ {t} ^ {(g)} \\nabla_ {\\theta_ {i}} (\\log p _ {I _ {t} ^ {(g)}} ^ {(t)}) - \\eta \\beta \\nabla_ {\\theta_ {i}} \\mathrm {K L} (p ^ {(t)} | p ^ {(0)}). i = 1, \\ldots , K + 1,\n$$\n", + "text_format": "latex", + "bbox": [ + 220, + 877, + 831, + 921 + ], + "page_idx": 14 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 112, + 32, + 429, + 47 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 490, + 941, + 506, + 954 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "C.3 Implicit Diversity Collapse without KL regularization", + "text_level": 1, + "bbox": [ + 111, + 95, + 632, + 114 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Theorem C.1 (Collapse to Deterministic Policy). Under REINFORCE or GRPO updates without KL regularization $(\\beta_0 = 0)$ , given a sufficient small $\\eta$ , with probability 1:", + "bbox": [ + 111, + 125, + 885, + 157 + ], + "page_idx": 15 + }, + { + "type": "equation", + "text": "\n$$\n\\limsup_{t\\to \\infty}\\max_{i\\in [K]}p_{i}^{(t)} = 1.\n$$\n", + "text_format": "latex", + "bbox": [ + 413, + 164, + 580, + 196 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Thus, the policy collapses to a single-arm strategy during training.", + "bbox": [ + 112, + 200, + 584, + 219 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Proof. The proof is two-fold.", + "bbox": [ + 112, + 233, + 325, + 251 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Using Lemma C.3 and C.4, we can show that bad arm probability diminishes,", + "bbox": [ + 111, + 256, + 676, + 273 + ], + "page_idx": 15 + }, + { + "type": "equation", + "text": "\n$$\n\\lim _ {t \\to \\infty} p _ {K + 1} ^ {(t)} = 0\n$$\n", + "text_format": "latex", + "bbox": [ + 439, + 280, + 557, + 308 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "We will then define a property named Self-enforcing Stochastic", + "bbox": [ + 111, + 321, + 580, + 339 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Definition C.2 (Self-enforcing Stochastic Policy Update Rule). We define three properties of policy update rule that will lead to diversity collapse", + "bbox": [ + 111, + 344, + 885, + 378 + ], + "page_idx": 15 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. The policy update takes the form of $\\sum_{k=1}^{B} A_k \\nabla \\log p_i(\\theta_{i_k})$ where $i_k$ is the $k$ -th sampled arm in the batch and $A_k$ is a function determined by (i) the sum of reward $\\sum_{i=1}^{K} r_{i_k}$ with in the batch; (ii) the reward $r_{i_k}$ and (iii) the batch size $B$ .", + "2. A policy update rule is said to be self-enforcing, if $\\mathbb{E}[\\theta_i^{(t + 1)} - \\theta_i^{(t)}]$ is monotonous with $\\theta_{i}^{(t)}$ for all $i\\in [K]$ and $t$ . Further $\\mathbb{E}[\\theta_i^{(t + 1)} - \\theta_i^{(t)}]$ is non-positive if $i\\geq K + 1$ and is non-negative if $i\\leq K$ .", + "3. A policy update rule is said to be self-enforcing stochastic if it is self-enforcing and there exists constants $C_1, C_2 > 0$ such that for any $\\epsilon > 0$ , whenever the current policy satisfies $\\max_{i \\in [K]} p_i^{(t)} \\in [1/2K, 1 - \\epsilon]$ (i.e., no single good arm dominates), for $i^* = \\arg \\max_{i \\in [K]} p_i^{(t)}$ the conditional second moment of the parameter updates for every arm $i \\in [K + 1]$ and $i \\neq i^*$ satisfies:" + ], + "bbox": [ + 151, + 392, + 885, + 575 + ], + "page_idx": 15 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbb {E} \\left[ \\left(\\left(\\theta_ {i} ^ {(t + 1)} - \\theta_ {i} ^ {(t)}\\right) - \\left(\\theta_ {i ^ {*}} ^ {(t + 1)} - \\theta_ {i ^ {*}} ^ {(t)}\\right)\\right) ^ {2} \\mid \\theta^ {(t)} \\right] \\geq C _ {1} \\epsilon^ {2}.\n$$\n", + "text_format": "latex", + "bbox": [ + 320, + 582, + 732, + 617 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "and", + "bbox": [ + 171, + 623, + 205, + 637 + ], + "page_idx": 15 + }, + { + "type": "equation", + "text": "\n$$\n| \\theta_ {i} ^ {(t + 1)} - \\theta_ {i} ^ {(t)} | < C _ {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 452, + 633, + 602, + 656 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Lemma C.5 shows that for any self-enforcing stochastic policy update rule, the final policy collapses into a single-arm policy.", + "bbox": [ + 111, + 669, + 882, + 700 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Using Lemma C.6 and C.7, we can show that REINFORCE and GRPO are self-enforcing stochastic policy update rules when bad arm probability is lower than $1 / 2$ . The proof is then complete.", + "bbox": [ + 111, + 705, + 885, + 739 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Lemma C.3 (Bad Arm Probability Diminishes Using REINFORCE). Under the REINFORCE algorithm without KL regularization $(\\beta = 0)$ , $\\lim_{t\\to \\infty}p_{K + 1}^{(t)} = 0$ almost surely.", + "bbox": [ + 111, + 746, + 885, + 785 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Proof. We can first simplify the REINFORCE update rule to", + "bbox": [ + 112, + 797, + 547, + 814 + ], + "page_idx": 15 + }, + { + "type": "equation", + "text": "\n$$\n\\theta_ {i} ^ {(t + 1)} = \\theta_ {i} ^ {(t)} + \\eta r _ {t} (\\mathbf {1} (I _ {t} = i) - p _ {i} ^ {(t)}), \\quad i = 1, \\dots , K + 1.\n$$\n", + "text_format": "latex", + "bbox": [ + 287, + 821, + 707, + 844 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Noted that $\\sum_{i}\\theta_{i}^{(t)}$ will not change with $t$ , WLOG, assume", + "bbox": [ + 111, + 859, + 542, + 880 + ], + "page_idx": 15 + }, + { + "type": "equation", + "text": "\n$$\n\\sum_ {i} \\theta_ {i} ^ {(t)} = 0.\n$$\n", + "text_format": "latex", + "bbox": [ + 450, + 887, + 545, + 921 + ], + "page_idx": 15 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 112, + 32, + 429, + 47 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 490, + 943, + 506, + 954 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Because $r_{K + 1} = 0$ , we can then assume without loss of generality, for all $t$ , $I_t \\leq K$ .", + "bbox": [ + 111, + 95, + 725, + 113 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "This then suggests that", + "bbox": [ + 112, + 119, + 287, + 135 + ], + "page_idx": 16 + }, + { + "type": "equation", + "text": "\n$$\n\\theta_ {K + 1} ^ {(t + 1)} = \\theta_ {K + 1} ^ {(t)} - \\eta p _ {K + 1} ^ {(t)}\n$$\n", + "text_format": "latex", + "bbox": [ + 405, + 136, + 589, + 160 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "monotonically decrease.", + "bbox": [ + 111, + 165, + 294, + 181 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "For any $\\epsilon$ , if $p_{K + 1}^{(t)} > \\epsilon$ holds for infinite $t$ , then there exists $t_0$ , where $\\theta_{K + 1}^t < \\log \\epsilon$ for any $t > t_0$ . For any $t > t_0$ , there exists $i \\in [K]$ , such that $\\theta_i^{(t)} > 0$ . This then suggests that", + "bbox": [ + 111, + 189, + 885, + 231 + ], + "page_idx": 16 + }, + { + "type": "equation", + "text": "\n$$\np _ {K + 1} ^ {(t)} \\leq \\exp (\\theta_ {K + 1} ^ {(t)} - \\theta_ {i} ^ {(t)}) \\leq \\epsilon .\n$$\n", + "text_format": "latex", + "bbox": [ + 379, + 238, + 614, + 262 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "This leads to a contradiction. The proof is then complete.", + "bbox": [ + 111, + 268, + 534, + 286 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Lemma C.4 (Bad Arm Probability Diminishes Using GRPO). Under the GRPO algorithm without KL regularization $(\\beta = 0), \\lim_{t \\to \\infty} p_{K+1}^{(t)} = 0$ almost surely.", + "bbox": [ + 111, + 300, + 883, + 338 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Proof. For GRPO, we can show that $\\tilde{r}_t^{(g)}$ is negative iff $I_t^{(g)} = K + 1$ . Therefore, we can show that $\\theta_{K+1}^{(t)}$ monotonically decreases, similar to the case in REINFORCE.", + "bbox": [ + 111, + 359, + 882, + 396 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "If $p_{K+1}^{(t)} > \\epsilon$ holds for some $t$ , one can prove that $\\theta_{K+1}^{(t)}$ will decrease by a constant depending on $\\epsilon$ in expectation. Therefore, following the same line as in C.3, we can prove that $\\lim_{t \\to \\infty} p_{K+1}^{(t)} = 0$ almost surely.", + "bbox": [ + 111, + 402, + 883, + 460 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Lemma C.5 (Collapse Happens for All Self-enforcing Stochastic Policy Update Rule). Consider a policy update process that is self-enforcing stochastic (Definition C.2), then $\\lim \\sup_{t\\to \\infty}\\max_{i\\in [K]}p_i^{(t)} = 1$ almost surely.", + "bbox": [ + 111, + 472, + 885, + 523 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Proof. We will inductively prove that for different $K$ the following induction hypotheses, for any $\\epsilon, \\delta > 0$ , there exists $T_{\\epsilon, \\delta, K} > 0$ ,", + "bbox": [ + 111, + 545, + 885, + 578 + ], + "page_idx": 16 + }, + { + "type": "equation", + "text": "\n$$\n\\Pr \\left(\\max _ {t < T _ {\\epsilon , \\delta , K}} \\max _ {i \\in [ K ]} p _ {i} ^ {(t)} < 1 - \\epsilon\\right) < \\delta .\n$$\n", + "text_format": "latex", + "bbox": [ + 369, + 604, + 625, + 635 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "We first consider the case where $K = 2$ .", + "bbox": [ + 111, + 651, + 410, + 667 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Consider the stopping time,", + "bbox": [ + 111, + 674, + 320, + 690 + ], + "page_idx": 16 + }, + { + "type": "equation", + "text": "\n$$\n\\tau_ {\\epsilon} = \\arg \\min _ {t} \\max _ {i \\in [ K ]} p _ {i} ^ {(t)} > 1 - \\epsilon\n$$\n", + "text_format": "latex", + "bbox": [ + 383, + 700, + 612, + 731 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "For any $\\mathcal{I} = \\{1,2\\}$ , define $\\Delta_{\\mathcal{I}}^{t} = \\max_{j\\in [K]}\\theta_{j}^{t} - \\min_{j\\in \\mathcal{I}}\\theta_{i}^{t}$ .", + "bbox": [ + 111, + 747, + 544, + 767 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Assume $\\theta_{i*}^t = \\max_{j\\in [K]}\\theta_j^t$ , because $|\\mathcal{I}|\\geq 2$ , there exists $i\\neq i^{*}$ , $\\min_{j\\in \\mathcal{I}}\\theta_i^t >0$ . We will show three properties of $\\Delta_I^t$", + "bbox": [ + 111, + 773, + 883, + 810 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "First $\\Delta_{\\mathcal{I}}^{(t)}$ is a submartingale defined on the filtration of the distribution of $\\theta^{(t)}$ because", + "bbox": [ + 111, + 816, + 748, + 837 + ], + "page_idx": 16 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbb {E} [ \\Delta_ {\\mathcal {I}} ^ {(t)} | \\theta_ {t} ] - \\Delta_ {\\mathcal {I}} ^ {(t - 1)} > \\mathbb {E} [ (\\theta_ {i ^ {*}} ^ {t + 1} - \\theta_ {i ^ {*}} ^ {t}) - (\\theta_ {i} ^ {t + 1} - \\theta_ {i} ^ {t}) | \\theta_ {t} ] > 0.\n$$\n", + "text_format": "latex", + "bbox": [ + 264, + 864, + 730, + 887 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "as the policy is self-enforcing.", + "bbox": [ + 111, + 902, + 334, + 919 + ], + "page_idx": 16 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 112, + 32, + 429, + 47 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 490, + 941, + 506, + 954 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Further $\\Delta_{\\mathcal{I}}^{(t)}$ has bounded growth of $2C_2$ as", + "bbox": [ + 111, + 94, + 434, + 116 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} | \\max _ {j \\in [ K ]} \\theta_ {j} ^ {t + 1} - \\max _ {j \\in [ K ]} \\theta_ {j} ^ {t} | < C _ {2}. \\\\ \\bigl|\\min_{j\\in \\mathcal{I}}\\theta_{j}^{t + 1} - \\max_{j\\in \\mathcal{I}}\\theta_{j}^{t}\\bigr| < C_{2}. \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 393, + 126, + 602, + 181 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Furthermore, the second-momentum of $\\Delta_{\\mathcal{I}}^{(t)}$ needs to increase with $t$ by a constant for any $t < \\tau_{\\epsilon}$ .", + "bbox": [ + 111, + 222, + 830, + 242 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\mathbb {E} \\left[ \\left(\\Delta_ {\\mathcal {I}} ^ {(t + 1)}\\right) ^ {2} \\mid \\theta_ {t} \\right] \\geq \\left(\\Delta_ {\\mathcal {I}} ^ {(t)}\\right) ^ {2} + \\mathbb {E} \\left[ \\left(\\Delta_ {\\mathcal {I}} ^ {(t + 1)} - \\Delta_ {\\mathcal {I}} ^ {(t)}\\right)\\right) ^ {2} \\mid \\theta_ {t} ] \\\\ \\geq \\left(\\Delta_ {I} ^ {(t)}\\right) ^ {2} + C _ {1} \\epsilon^ {2}. \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 297, + 252, + 697, + 296 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "When $t < \\tau_{\\epsilon}$ , it holds that $\\Delta_{\\mathcal{I}}^{(t)} < \\log \\frac{2}{\\epsilon}$ , otherwise we can prove that", + "bbox": [ + 111, + 316, + 617, + 337 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\n\\max _ {i, j \\in \\{1, 2 \\}} p _ {i} / p _ {j} = \\exp (\\Delta_ {\\mathcal {I}} ^ {(t)}) > \\frac {2 - 2 \\epsilon}{\\epsilon}. \\Rightarrow \\max _ {i \\in \\{1, 2 \\}} p _ {i} > 1 - \\epsilon .\n$$\n", + "text_format": "latex", + "bbox": [ + 269, + 345, + 725, + 378 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "This is a contradiction. Further, by Martingale inequality, we have that", + "bbox": [ + 112, + 388, + 632, + 405 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbb {E} [ \\left(\\Delta^ {\\min \\{t, \\tau_ {\\epsilon} \\}}\\right) ^ {2} ] > \\mathbb {E} [ \\left(\\Delta^ {0}\\right) ^ {2} ] + C _ {1} \\epsilon^ {2} \\mathbb {E} [ \\min \\{t, \\tau_ {\\epsilon} \\} ]\n$$\n", + "text_format": "latex", + "bbox": [ + 310, + 414, + 686, + 441 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Further, as $\\Delta^t$ has bounded growth, we have that", + "bbox": [ + 112, + 459, + 477, + 476 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbb {E} \\left[ \\left(\\Delta^ {\\min \\{t, \\tau_ {\\epsilon} \\}}\\right) ^ {2} \\right] < (\\log \\frac {2}{\\epsilon} + 2 C _ {2}) ^ {2}.\n$$\n", + "text_format": "latex", + "bbox": [ + 366, + 484, + 630, + 516 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "This implies $\\mathbb{E}[\\min \\{t,\\tau_{\\epsilon}\\}] < \\frac{(\\log\\frac{2}{\\epsilon} + 2C_2)^2}{C_1\\epsilon^2}$ for all $t$ , this implies", + "bbox": [ + 111, + 532, + 570, + 559 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbb {E} [ \\tau_ {\\epsilon} ] < \\frac {(\\log \\frac {2}{\\epsilon} + 2 C _ {2}) ^ {2}}{C _ {1} \\epsilon^ {2}}.\n$$\n", + "text_format": "latex", + "bbox": [ + 408, + 568, + 588, + 604 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Further, by Markov inequality, if we choose", + "bbox": [ + 112, + 619, + 434, + 637 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\nT _ {\\epsilon , \\delta , 2} = \\frac {(\\log \\frac {2}{\\epsilon} + 2 C _ {2}) ^ {2}}{C _ {1} \\epsilon^ {2} \\delta}.\n$$\n", + "text_format": "latex", + "bbox": [ + 408, + 646, + 589, + 681 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "then,", + "bbox": [ + 112, + 691, + 153, + 705 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\n\\Pr \\left(\\tau_ {\\epsilon} > T _ {\\epsilon , \\delta , 2}\\right) < \\frac {\\mathbb {E} \\left[ \\tau_ {\\epsilon} \\right]}{T _ {\\epsilon , \\delta , 2}} < \\delta .\n$$\n", + "text_format": "latex", + "bbox": [ + 392, + 714, + 604, + 750 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "This concludes the proof for $K = 2$ .", + "bbox": [ + 112, + 766, + 380, + 782 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Now assuming the result holds for $K - 1$ and consider the case for $K$ , First, we choose a small enough constant $C_{\\delta ,\\epsilon ,K,N} > 0$ , such that when $p_{K - 1}^{(0)} < C_{\\delta ,\\epsilon ,K,N}$ , the following two random processes are close:", + "bbox": [ + 111, + 789, + 883, + 825 + ], + "page_idx": 17 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Running the algorithm for $N$ steps on the $K$ arms bandit yields $\\theta_i^{(t)}, i \\in [K]$", + "- Running the algorithm for $N$ steps on a $K - 1$ arms bandit yields $\\tilde{\\theta}_i^{(t)}, i \\in [K - 1]$ with $\\tilde{\\theta}_i^{(0)} = \\theta_i^{(0)}, i < K - 1$ and $\\tilde{\\theta}_{K - 1}^{(0)} = \\theta_K(0)$" + ], + "bbox": [ + 155, + 848, + 880, + 919 + ], + "page_idx": 17 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 112, + 32, + 429, + 47 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 490, + 941, + 506, + 953 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "and there exists a joint measure on $\\theta$ and $\\tilde{\\theta}$ such that", + "bbox": [ + 111, + 95, + 503, + 112 + ], + "page_idx": 18 + }, + { + "type": "equation", + "text": "\n$$\n\\forall i \\in [ K - 2 ], t < N, \\Pr (| p _ {i} ^ {t} - \\tilde {p} _ {i} ^ {t} | > \\epsilon / 2) < \\delta / 6.\n$$\n", + "text_format": "latex", + "bbox": [ + 316, + 118, + 676, + 138 + ], + "page_idx": 18 + }, + { + "type": "equation", + "text": "\n$$\n\\operatorname * {P r} (| p _ {K} ^ {t} - \\tilde {p} _ {K - 1} ^ {t} | > \\epsilon / 2) < \\delta / 6.\n$$\n", + "text_format": "latex", + "bbox": [ + 444, + 140, + 676, + 159 + ], + "page_idx": 18 + }, + { + "type": "equation", + "text": "\n$$\n\\Pr \\left(\\left| p _ {K} ^ {t} - p _ {K} ^ {0} \\right| > \\epsilon / 2\\right) < \\delta / 6.\n$$\n", + "text_format": "latex", + "bbox": [ + 460, + 162, + 676, + 181 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "This joint measure is constructed by choosing the corresponding arm for two process at each sampling step as long as the sampled arm is not $K$ and uses the uniform convergence on $\\nabla \\log_{\\theta} p_i$ . Now following the same argument at $K = 2$ , we can show that there exists $\\tilde{T}_{\\epsilon, \\delta, K}$ such that", + "bbox": [ + 111, + 195, + 885, + 244 + ], + "page_idx": 18 + }, + { + "type": "equation", + "text": "\n$$\n\\operatorname * {P r} (\\exists t < \\tilde {T} _ {\\epsilon , \\delta , K}, \\min _ {t \\in [ K ]} p _ {t} < C _ {\\delta , \\epsilon , K, T _ {\\epsilon / 2, \\delta / 2, K - 1}}) > 1 - \\delta / 2.\n$$\n", + "text_format": "latex", + "bbox": [ + 287, + 251, + 707, + 280 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Then we can invoke the induction hypothesis and uses the coupling shown above to show that if we choose $T_{\\epsilon, \\delta, K} = \\tilde{T}_{\\epsilon, \\delta, K} + T_{\\epsilon/2, \\delta/2, K-1}$ , then there exists a time step that one arm has probability higher than $1 - \\epsilon$ with probability at least $1 - \\delta$ .", + "bbox": [ + 111, + 294, + 885, + 342 + ], + "page_idx": 18 + }, + { + "type": "image", + "img_path": "images/ca1b539d95c5fb460f2be006fa7134f3f1ba977fdaf654bf1420e28cac93d5b2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 864, + 348, + 883, + 362 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Lemma C.6. The REINFORCE algorithm without KL regularization ( $\\beta = 0$ ) is self-enforcing stochastic (Definition C.2) once $p_{K+1}^{(t)} < 1/2$ .", + "bbox": [ + 111, + 372, + 885, + 410 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Proof. The REINFORCE algorithm is self-enforcing because", + "bbox": [ + 112, + 422, + 552, + 439 + ], + "page_idx": 18 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbb {E} [ \\theta_ {i} ^ {(t + 1)} - \\theta_ {i} ^ {(t)} ] = \\eta p _ {i} (r _ {i} - \\sum_ {j \\in [ K + 1 ]} p _ {j} r _ {j}).\n$$\n", + "text_format": "latex", + "bbox": [ + 341, + 446, + 651, + 484 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Further,", + "bbox": [ + 112, + 500, + 176, + 513 + ], + "page_idx": 18 + }, + { + "type": "equation", + "text": "\n$$\n| \\theta_ {i} ^ {(t + 1)} - \\theta_ {i} ^ {(t)} | \\leq 1\n$$\n", + "text_format": "latex", + "bbox": [ + 428, + 520, + 568, + 542 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "and if we consider the distribution of $\\Delta_{i,i^*,t} = \\frac{\\left(\\theta_i^{(t + 1)} - \\theta_i^{(t)}\\right) - \\left(\\theta_{i^*}^{(t + 1)} - \\theta_{i^*}^{(t)}\\right)}{\\eta}$ , it holds that", + "bbox": [ + 111, + 549, + 751, + 587 + ], + "page_idx": 18 + }, + { + "type": "equation", + "text": "\n$$\n\\Delta_ {i, i ^ {*}, t} = r _ {I _ {t}} \\left(\\mathbf {1} (i = I _ {t}) - \\mathbf {1} (i ^ {*} = I _ {t}) - p _ {i} + p _ {i ^ {*}}\\right)\n$$\n", + "text_format": "latex", + "bbox": [ + 320, + 593, + 674, + 612 + ], + "page_idx": 18 + }, + { + "type": "equation", + "text": "\n$$\n\\Pr \\left(\\Delta_ {i, i ^ {*}, t} = - 1 - p _ {i} + p _ {i} ^ {*}\\right) \\geq \\Pr \\left(I _ {t} = i ^ {*}\\right) = p _ {i ^ {*}}\n$$\n", + "text_format": "latex", + "bbox": [ + 313, + 648, + 679, + 667 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Therefore", + "bbox": [ + 112, + 681, + 191, + 695 + ], + "page_idx": 18 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\mathbb {E} \\left[ \\Delta_ {i, i ^ {*}, t} ^ {2} \\right] \\geq p _ {i ^ {*}} \\left(- 1 - p _ {i} + p _ {i} ^ {*}\\right) ^ {2} \\\\ \\geq p _ {i ^ {*}} (1 - p _ {i ^ {*}}) ^ {2} \\geq \\frac {\\epsilon^ {2}}{2 K}. \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 370, + 702, + 620, + 760 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "This then concludes the proof with $C_1 = \\eta / 2K$ and $C_2 = \\eta$ .", + "bbox": [ + 111, + 766, + 558, + 782 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Lemma C.7. The GRPO algorithm without KL regularization ( $\\beta = 0$ ) is self-enforcing stochastic (Definition C.2) once $p_{K+1}^{(t)} < 1/2$ .", + "bbox": [ + 111, + 789, + 885, + 828 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Proof. The GRPO algorithm is self-enforcing because", + "bbox": [ + 112, + 840, + 504, + 857 + ], + "page_idx": 18 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbb {E} [ \\theta_ {i} ^ {(t + 1)} - \\theta_ {i} ^ {(t)} ] = \\eta \\mathbb {E} [ \\tilde {r} _ {t} ^ {(g)} (\\mathbf {1} (I _ {t} ^ {(g)} = i) - p _ {i} ^ {(t)}) ] = \\eta \\mathbb {E} [ \\tilde {r} _ {t} ^ {(g)} \\mathbf {1} (I _ {t} ^ {(g)} = i) ] = \\eta \\mathbb {E} _ {\\mu_ {t}} [ \\mathbb {E} [ \\tilde {r} _ {t} ^ {(g)} \\mathbf {1} (I _ {t} ^ {(g)} = i) | \\mu_ {t} ] ].\n$$\n", + "text_format": "latex", + "bbox": [ + 125, + 864, + 867, + 890 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Noted that $\\mathbb{E}[\\tilde{r}_t^{(g)}\\mathbf{1}(I_t^{(g)} = i)|\\mu_t]$ is monotonous with $p_i$ , hence monotonous with $\\theta_{i}$ .", + "bbox": [ + 111, + 900, + 728, + 919 + ], + "page_idx": 18 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 112, + 32, + 429, + 47 + ], + "page_idx": 18 + }, + { + "type": "page_number", + "text": "19", + "bbox": [ + 490, + 941, + 506, + 954 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Further", + "bbox": [ + 112, + 95, + 173, + 109 + ], + "page_idx": 19 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} | \\theta_ {i} ^ {(t + 1)} - \\theta_ {i} ^ {(t)} | \\leq \\eta \\max _ {g} | \\tilde {r} _ {t} ^ {(g)} (\\mathbf {1} (I _ {t} ^ {(g)} = i) - p _ {i} ^ {(t)}) | \\\\ \\leq \\eta \\max _ {g} | \\tilde {r} _ {t} ^ {(g)} | \\leq \\eta \\sqrt {G}. \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 316, + 113, + 681, + 171 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Now we only need to lower bound the second momentum of", + "bbox": [ + 112, + 183, + 563, + 198 + ], + "page_idx": 19 + }, + { + "type": "equation", + "text": "\n$$\n\\Delta_ {i, i ^ {*}, t} = \\frac {\\left(\\theta_ {i} ^ {(t + 1)} - \\theta_ {i} ^ {(t)}\\right) - \\left(\\theta_ {i ^ {*}} ^ {(t + 1)} - \\theta_ {i ^ {*}} ^ {(t)}\\right)}{\\eta}\n$$\n", + "text_format": "latex", + "bbox": [ + 338, + 202, + 656, + 247 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Noted that", + "bbox": [ + 112, + 265, + 196, + 279 + ], + "page_idx": 19 + }, + { + "type": "equation", + "text": "\n$$\n\\theta_ {i} ^ {(t + 1)} - \\theta_ {i} ^ {(t)} = \\frac {\\eta}{G} \\sum_ {g = 1} ^ {G} \\tilde {r} _ {t} ^ {(g)} \\mathbf {1} (I _ {t} ^ {(g)} = i).\n$$\n", + "text_format": "latex", + "bbox": [ + 357, + 282, + 637, + 327 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "It holds that", + "bbox": [ + 112, + 339, + 205, + 353 + ], + "page_idx": 19 + }, + { + "type": "equation", + "text": "\n$$\n\\sigma_ {t} = \\sqrt {\\frac {1}{G} \\sum_ {g} (r _ {t} ^ {g} - \\mu) ^ {2}} = \\sqrt {\\frac {1}{G} \\sum_ {g} r _ {t} ^ {g} - 2 \\mu r _ {t} ^ {g} + \\mu^ {2}} = \\sqrt {\\mu - \\mu^ {2}}.\n$$\n", + "text_format": "latex", + "bbox": [ + 259, + 356, + 736, + 405 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Therefore when $r_t^{(g)} > 0$", + "bbox": [ + 112, + 417, + 303, + 436 + ], + "page_idx": 19 + }, + { + "type": "equation", + "text": "\n$$\n\\tilde {r} _ {t} ^ {(g)} = \\frac {r _ {t} ^ {(g)} - \\mu_ {t}}{\\sigma_ {t}} = \\frac {1 - \\mu_ {t}}{\\sigma_ {t}} = \\sqrt {\\frac {1 - \\mu_ {t}}{\\mu_ {t}}} \\geq \\sqrt {\\frac {1}{G - 1}}.\n$$\n", + "text_format": "latex", + "bbox": [ + 303, + 441, + 691, + 484 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Because all $\\tilde{r}_t^{(g)}$ are the same when $r_t^{(g)} > 0$ , it holds that when $i \\in [K]$", + "bbox": [ + 112, + 497, + 640, + 517 + ], + "page_idx": 19 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\Delta_ {i, i ^ {*}, t} ^ {2} = \\frac {1}{G} \\frac {1 - \\mu_ {t}}{\\mu_ {t}} \\left(\\sum_ {g = 1} ^ {G} {\\bf 1} (I _ {t} ^ {(g)} = i) - {\\bf 1} (I _ {t} ^ {(g)} = i ^ {*})\\right) ^ {2} \\\\ \\geq \\frac {1}{G (G - 1)} \\left(\\sum_ {g = 1} ^ {G} \\mathbf {1} \\left(I _ {t} ^ {(g)} = i\\right) - \\mathbf {1} \\left(I _ {t} ^ {(g)} = i ^ {*}\\right)\\right) ^ {2}. \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 292, + 522, + 704, + 625 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "This then implies", + "bbox": [ + 112, + 636, + 243, + 652 + ], + "page_idx": 19 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbb {E} [ \\Delta_ {i, i ^ {*}, t} ^ {2} ] \\geq \\frac {1}{G (G - 1)} \\mathbb {E} \\left[ \\left(\\sum_ {g = 1} ^ {G} {\\bf 1} (I _ {t} ^ {(g)} = i) - {\\bf 1} (I _ {t} ^ {(g)} = i ^ {*})\\right) ^ {2} \\Big | \\mu_ {t} \\neq 1, 0 \\right]\n$$\n", + "text_format": "latex", + "bbox": [ + 227, + 656, + 764, + 713 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "One can without loss of generality assume $I_{t}^{(G)} = K + 1$ and show that", + "bbox": [ + 112, + 724, + 643, + 744 + ], + "page_idx": 19 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\mathbb {E} \\left[ \\Delta_ {i, i ^ {*}, t} ^ {2} \\right] \\geq \\frac {1}{G (G - 1)} \\mathbb {E} \\left[ \\left(\\sum_ {g = 1} ^ {G - 1} \\mathbf {1} \\left(I _ {t} ^ {(g)} = i\\right) - \\mathbf {1} \\left(I _ {t} ^ {(g)} = i ^ {*}\\right)\\right) ^ {2} \\right] \\\\ \\geq \\frac {1}{G} \\mathbb {E} \\left[ \\left(\\mathbf {1} \\left(I _ {t} ^ {(1)} = i\\right) - \\mathbf {1} \\left(I _ {t} ^ {(1)} = i ^ {*}\\right)\\right) ^ {2} \\right] = \\frac {p _ {i} + p _ {i} ^ {*}}{G} \\geq \\frac {1}{2 K G}. \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 241, + 750, + 754, + 840 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "When $i \\neq K$ , noted that $\\left(\\theta_{i}^{(t+1)} - \\theta_{i}^{(t)}\\right) - \\left(\\theta_{i^{*}}^{(t+1)} - \\theta_{i^{*}}^{(t)}\\right) > \\left(\\theta_{i}^{(t+1)} - \\theta_{i}^{(t)}\\right) > 0$ . Therefore, a similar bound can show that $\\mathbb{E}[\\Delta_{i,i^{*},t}^{2}] > \\frac{1}{2KG}$ . This then concludes the proof with $C_{1} = \\eta / 2KG$ and $C_{2} = \\sqrt{G}$ .", + "bbox": [ + 111, + 852, + 883, + 897 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "□", + "bbox": [ + 864, + 902, + 882, + 915 + ], + "page_idx": 19 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 112, + 32, + 429, + 47 + ], + "page_idx": 19 + }, + { + "type": "page_number", + "text": "20", + "bbox": [ + 488, + 941, + 506, + 953 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "C.4 Diversity Never Improves with KL regularization", + "text_level": 1, + "bbox": [ + 111, + 95, + 589, + 113 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Theorem C.8 (Diversity Preservation under KL Regularization). With $p_0$ as the initial policy and KL regularization hyperparameter $\\beta > 0$ , if the REINFORCE process converges to policy $p^*$ . Then, $p^*$ satisfies:", + "bbox": [ + 111, + 125, + 885, + 157 + ], + "page_idx": 20 + }, + { + "type": "equation", + "text": "\n$$\n\\frac {p ^ {*} (i)}{\\sum_ {j = 1} ^ {K} p ^ {*} (j)} = \\frac {p _ {0} (i)}{\\sum_ {j = 1} ^ {K} p _ {0} (j)} \\quad \\forall i \\in \\{1, \\dots , K \\}.\n$$\n", + "text_format": "latex", + "bbox": [ + 323, + 162, + 671, + 203 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Consequently, the distribution over the optimal arms under $p^*$ matches the initial distribution $p_0$ restricted to these arms and renormalized.", + "bbox": [ + 111, + 209, + 885, + 239 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Proof. Using policy gradient theorem, we know that the converged policy $p^*$ and corresponding parameter $\\theta^*$ satisfy that,", + "bbox": [ + 111, + 256, + 883, + 287 + ], + "page_idx": 20 + }, + { + "type": "equation", + "text": "\n$$\n\\nabla_ {\\theta} \\left[ \\sum_ {i = 1} ^ {K + 1} r _ {i} p _ {i} + \\beta \\mathrm {K L} \\left(p | p ^ {0}\\right) \\right] \\Bigg | _ {\\theta = \\theta^ {*}} = 0\n$$\n", + "text_format": "latex", + "bbox": [ + 348, + 294, + 648, + 340 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "This then suggests that for any $k$", + "bbox": [ + 111, + 353, + 357, + 369 + ], + "page_idx": 20 + }, + { + "type": "equation", + "text": "\n$$\nr _ {k} p _ {k} ^ {*} - p _ {k} ^ {*} \\sum_ {i = 1} ^ {K + 1} r _ {i} ^ {*} p _ {i} ^ {*} + \\beta \\sum_ {i = 1} ^ {K + 1} \\nabla_ {\\theta_ {k}} [ p _ {i} \\log p _ {i} - p _ {i} \\log p _ {i} ^ {0} ] = 0\n$$\n", + "text_format": "latex", + "bbox": [ + 282, + 377, + 714, + 421 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "This is equivalent to", + "bbox": [ + 111, + 435, + 266, + 450 + ], + "page_idx": 20 + }, + { + "type": "equation", + "text": "\n$$\nr _ {k} p _ {k} ^ {*} - p _ {k} ^ {*} \\sum_ {i = 1} ^ {K + 1} r _ {i} ^ {*} p _ {i} ^ {*} + \\beta \\sum_ {i = 1} ^ {K + 1} (\\mathbf {1} (i = k) - p _ {k} ^ {*}) p _ {i} ^ {*} (\\log p _ {i} ^ {*} + 1 - \\log p _ {i} ^ {0}) = 0\n$$\n", + "text_format": "latex", + "bbox": [ + 230, + 458, + 764, + 501 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Simplifying", + "bbox": [ + 111, + 516, + 205, + 532 + ], + "page_idx": 20 + }, + { + "type": "equation", + "text": "\n$$\nr _ {k} + \\beta (\\log p _ {k} ^ {*} + 1 - \\log p _ {0}) = \\sum_ {i = 1} ^ {K + 1} r _ {i} ^ {*} p _ {i} ^ {*} + \\beta \\sum_ {i = 1} ^ {K + 1} p _ {i} ^ {*} (\\log p _ {i} ^ {*} + 1 - \\log p _ {i} ^ {0})\n$$\n", + "text_format": "latex", + "bbox": [ + 228, + 558, + 766, + 602 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "For all $k \\in [K]$ , we know that $r_k$ is equivalent, therefore, $\\frac{p_k^*(i)}{p_0^*(i)}$ is a constant for $k \\in [K]$ , concluding our proof.", + "bbox": [ + 109, + 618, + 885, + 657 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "C.5 Technical Lemma", + "text_level": 1, + "bbox": [ + 112, + 674, + 313, + 690 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Lemma C.9. For $x\\in \\mathbb{R}$ $|x| < C$ , it holds that", + "bbox": [ + 111, + 704, + 455, + 720 + ], + "page_idx": 20 + }, + { + "type": "equation", + "text": "\n$$\n\\exp (x) > 1 + x + A _ {C} x ^ {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 403, + 728, + 593, + 747 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "here $A_{C} = \\frac{\\exp(-C) + C - 1}{C^{2}}$", + "bbox": [ + 112, + 753, + 302, + 777 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Proof. Define $g(x) = \\frac{\\exp(x) - 1 - x}{x^2}$ , this function monotonically increases when $x < 0$ .", + "bbox": [ + 111, + 792, + 728, + 814 + ], + "page_idx": 20 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 112, + 32, + 429, + 47 + ], + "page_idx": 20 + }, + { + "type": "page_number", + "text": "21", + "bbox": [ + 488, + 941, + 506, + 954 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "D Open-Thoughts Evaluation", + "text_level": 1, + "bbox": [ + 112, + 95, + 447, + 116 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "We finetune Qwen2.5-7B-Instruct over OpenThoughts-114k for 5 epochs using BF16 and AdamW and hyperparameters lr=1e-5, bs=128, warmup=150 steps. We sample 40 reasoning traces with temperature set to 0.7 for each of the 30 problems in AIME24. Then we evaluate the following quantities.", + "bbox": [ + 109, + 131, + 883, + 179 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/73d34b55c39b755f68e6950c8eebdf29d21cb222617fda1fe97a55a0270a9208.jpg", + "image_caption": [ + "Competition Math (AIME24)" + ], + "image_footnote": [], + "bbox": [ + 194, + 217, + 395, + 367 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/5aa59d6e2aba258b1051ce25b0904b17cd2e7490d5e852f24f61c5460a902111.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 400, + 217, + 598, + 366 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/b920292dcecb0dc0a1bc2d82f9a4af452fedf7363369eff449048c35d2f2d1b2.jpg", + "image_caption": [ + "Figure 10: Pass@K Evaluated on AIME24 over OpenThoughts-114K SFT checkpoints. We plot the expected Pass@K ± SD. Note that improvements in Pass@K slows down while Pass@1 improves at a constant rate. Furthermore, the confidence interval of Pass@1 widens, meaning the variance increases during SFT." + ], + "image_footnote": [], + "bbox": [ + 604, + 217, + 803, + 367 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/5667e64bdc969d25573bf150b13c2046ca2168b091045a68f3bfe49c56c2ea9e.jpg", + "image_caption": [ + "Figure 11: Histogram of Pass@1 over AIME24. Variance of Pass@1 increases over finetuning on OpenThoughts-114K. We note that since AIME24 only has 30 questions, the density plot may not be completely reliable." + ], + "image_footnote": [], + "bbox": [ + 117, + 449, + 267, + 561 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/ce0b365e2d0cfdbc6033585c43605d8fc9a10bbe002ac2b4022da555660ffbd8.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 271, + 449, + 421, + 561 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/a68455b7785772599da90654e44c982cadd4ddd78f610dab8af5816a70e0c43c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 424, + 450, + 575, + 560 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/25ebeb290d7259127374c9c05e2e0ce5075d7b65dfef8be8699ed49268e3b01a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 578, + 450, + 727, + 560 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/3eb53d8c924bb92658ec5635c7b2ae63fb3273756bce235deb3ec5596f40bd57.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 733, + 450, + 880, + 560 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/bb1d37404a819e2ad64f45cefe623549253879023f3bf030ffe2894bde9a30af.jpg", + "image_caption": [ + "Figure 12: We plot the average number of unique answers sampled over the total number samples i.e. $\\left|\\left\\{y_{i}\\right\\}_{i=1}^{n}\\right| / n$ . Model samples less diverse number of answers as SFT progresses." + ], + "image_footnote": [], + "bbox": [ + 271, + 643, + 723, + 832 + ], + "page_idx": 21 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 112, + 32, + 431, + 47 + ], + "page_idx": 21 + }, + { + "type": "page_number", + "text": "22", + "bbox": [ + 488, + 941, + 508, + 954 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "E Interpolation Coefficients", + "text_level": 1, + "bbox": [ + 112, + 94, + 429, + 116 + ], + "page_idx": 22 + }, + { + "type": "image", + "img_path": "images/408e81cdfc67395009c30c39aad9c4a31fa71dc028099dd11d2d322e044cc302.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 117, + 155, + 292, + 281 + ], + "page_idx": 22 + }, + { + "type": "image", + "img_path": "images/5cc7d3e6e8dc1b3da5bcfeaac5e13c685f56aae0ab567a53aa222a84743494fd.jpg", + "image_caption": [ + "WiSE-Step672 on MATH500" + ], + "image_footnote": [], + "bbox": [ + 305, + 154, + 480, + 280 + ], + "page_idx": 22 + }, + { + "type": "image", + "img_path": "images/b592a7e7ccb7f34cab0c0fe80f8b46299a170f5e015417c6a260678ca37e45d5.jpg", + "image_caption": [ + "WiSE-Step672 on MATH500", + "WiSE-Step672 on MATH500" + ], + "image_footnote": [], + "bbox": [ + 493, + 152, + 666, + 280 + ], + "page_idx": 22 + }, + { + "type": "image", + "img_path": "images/e15bf9634190a320923c26e9e93036be5f32c027f6a9256280b04a259db1412f.jpg", + "image_caption": [ + "WiSE-Step672 on MATH500" + ], + "image_footnote": [], + "bbox": [ + 679, + 154, + 854, + 280 + ], + "page_idx": 22 + }, + { + "type": "image", + "img_path": "images/a8fef11dae9deb43d79b665e01086d9767e0568d82dec02ac4133daecf6e053a.jpg", + "image_caption": [ + "WiSE-Step896 on MATH500" + ], + "image_footnote": [], + "bbox": [ + 117, + 297, + 292, + 424 + ], + "page_idx": 22 + }, + { + "type": "image", + "img_path": "images/b5a0036c7749a83fbea586e2ad026ccfe84a4e95cb087bdcfd3c7171f8cbc3f2.jpg", + "image_caption": [ + "WiSE-Step896 on MATH500" + ], + "image_footnote": [], + "bbox": [ + 305, + 297, + 480, + 424 + ], + "page_idx": 22 + }, + { + "type": "image", + "img_path": "images/e0ef05e879f2cabc6cc375cd5dfc6c14399c3e8c637b35a2bdd2dcd0382c64de.jpg", + "image_caption": [ + "WiSE-Step896 on MATH500" + ], + "image_footnote": [], + "bbox": [ + 493, + 297, + 666, + 424 + ], + "page_idx": 22 + }, + { + "type": "image", + "img_path": "images/cd383a929ba7e9a9865537886d40e7be10298f21f2a4eee59bb64f1c03895e08.jpg", + "image_caption": [ + "WiSE-Step896 on MATH500" + ], + "image_footnote": [], + "bbox": [ + 679, + 297, + 854, + 424 + ], + "page_idx": 22 + }, + { + "type": "image", + "img_path": "images/3d13f320227abed29d629f8ace04823e9059603f82a24fb7c3c0d25ac51e5eb4.jpg", + "image_caption": [ + "WiSE-Step1120 on MATH500", + "Figure 13: Pass@1 versus Pass@K of WiSEFT of Qwen-2.5-0.5B trained and evaluated on MATH500. We interpolate between model $\\pmb{w}_0$ at Step 112 with $\\pmb{w}_t$ for $t\\in [672,896,1120]$ as $\\delta \\pmb{w}_0 + (1 - \\delta)\\pmb{w}_t$ where $\\delta \\in [0.1,0.9]$ ." + ], + "image_footnote": [], + "bbox": [ + 117, + 441, + 297, + 568 + ], + "page_idx": 22 + }, + { + "type": "image", + "img_path": "images/d05652d4c9f3cc0229b825169a87f0a25576d63a99afa799f8969368cab3b996.jpg", + "image_caption": [ + "WiSE-Step1120 on MATH500" + ], + "image_footnote": [], + "bbox": [ + 305, + 441, + 486, + 568 + ], + "page_idx": 22 + }, + { + "type": "image", + "img_path": "images/a79e505ca270a42946351e05193fde44a40c51342de249abe1125406caef19ef.jpg", + "image_caption": [ + "WiSE-Step1120 on MATH500" + ], + "image_footnote": [], + "bbox": [ + 493, + 441, + 673, + 568 + ], + "page_idx": 22 + }, + { + "type": "image", + "img_path": "images/4970e2bbeb01a58d65034f7eee2d971620f82a124a24f05a15a0868fd8060784.jpg", + "image_caption": [ + "WiSE-Step1120 on MATH500" + ], + "image_footnote": [], + "bbox": [ + 679, + 441, + 859, + 568 + ], + "page_idx": 22 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 114, + 32, + 431, + 47 + ], + "page_idx": 22 + }, + { + "type": "page_number", + "text": "23", + "bbox": [ + 488, + 941, + 506, + 953 + ], + "page_idx": 22 + }, + { + "type": "image", + "img_path": "images/6415bf26abc61b3f3704f1e93f285ad0c1d640bbb79a1978e63f250e5f01c217.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 116, + 111, + 313, + 253 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/cee9d9f66f1ba05e20958f6280cd378d5414554e40cbe63513148f4d200be612.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 316, + 111, + 500, + 253 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/7e11ff93e0b785d9953e3d89bcd12390815907a6dc2f3d35716009e51d01b2a9.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 500, + 111, + 686, + 253 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/e2a337f3f3b316e21bd15dad207ede3c32034de3208f9ee58d3e8e8a316f2a94.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 681, + 111, + 872, + 253 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/c5f007038e4a8539395358cfc078786e75a2500ccebd39ea8dcc0d9f3edd38c9.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 122, + 257, + 305, + 398 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/8ab320b8b9b148e7007304b25bcea6941eee571cdcfb9f2bcb270cc915f41d2e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 310, + 258, + 491, + 398 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/534a1915e7237bbe8eb59cd3e2becaa163ccc90f8b40a09cb87b02ff2f834f83.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 500, + 258, + 679, + 398 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/88a2aea4db6f0ceb07a3f515c45521b42aef420591197fc4ac31c9e750a6eb8c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 686, + 258, + 867, + 398 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/d98f4a0d11c754910a2b645db67fdc99936a484cb60dcd83a0344a2437fc161b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 124, + 404, + 299, + 546 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/5186f02cf14107fb5f45cd06a48b8a84b8d148a57fc1f02fb24e57f2de6f0b2c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 310, + 404, + 486, + 546 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/6879e45cb82d6e4e3cb71ac588c9076b39549c4f56feda977179787fe237fef8.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 500, + 404, + 687, + 546 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/7c463a6b643c401de38db5cb6513d86e528960fd75d924839fc2a365a4fe7d82.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 686, + 404, + 877, + 546 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/210f985ab8344ab9024b0e8866757143d08c6a402d3b38801bfe06bad03d0471.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 124, + 551, + 299, + 691 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/97b56b7e63b48c137ae6e876983d6e3b63f26d408263648812f7955049209453.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 310, + 551, + 486, + 691 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/6583cb61c58420a22e2b481d3ecd6ab5badcb2086aeae20b3945609becb32131.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 500, + 551, + 674, + 691 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/76cd3796b40caac6ff8a02aec6cb5d704946a539355c961c1da0cde04fcc0f3b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 686, + 551, + 877, + 691 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/9b0b5f9803b94a0b5f4b2b333482b5acab14a7b4396c642f98d4c63f944bbd16.jpg", + "image_caption": [ + "Figure 14: Pass@1 versus Pass@K of WiSEFT of Gemma-2-2B trained and evaluated on GSM8K. We interpolate between model $\\pmb{w}_0$ at Step 171 with $\\pmb{w}_t$ for $t \\in [342, 684, 1026, 1368, 1710]$ as $\\delta \\pmb{w}_0 + (1 - \\delta) \\pmb{w}_t$ where $\\delta \\in [0.05, 0.9]$ ." + ], + "image_footnote": [], + "bbox": [ + 124, + 696, + 299, + 839 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/4c43efb60dc7fd63fa7e0bf6fe8153abec532160cb2af6bb071e080da392c5f2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 310, + 696, + 486, + 839 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/ad7003e3f0b25d46193c9fffc98906a772a124775a56cd8b98a55940824b50a2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 500, + 696, + 674, + 839 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/b1061168966769f0a706a6cdefd0ca7df762aaf6f5cf2e53465bfdddcf6d261d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 686, + 696, + 877, + 839 + ], + "page_idx": 23 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 114, + 32, + 431, + 47 + ], + "page_idx": 23 + }, + { + "type": "page_number", + "text": "24", + "bbox": [ + 488, + 941, + 508, + 954 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "F Measuring Diversity of Traces", + "text_level": 1, + "bbox": [ + 112, + 95, + 473, + 116 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "We measure the diversity of the 100 sampled traces of Gemma-2-2B across GSM8k test. We measure diversity in terms of 3 different measures.", + "bbox": [ + 109, + 131, + 883, + 162 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Output Diversity The cardinality or number of unique answers in the set of all model outputs $\\left|\\{\\hat{y}_1,\\hat{y}_2,\\dots ,\\hat{y}_n\\}\\right|$ over the total number of traces.", + "bbox": [ + 111, + 175, + 883, + 207 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Operation Diversity In GSM8k, each intermediate step consists of basic arithmetic operations, e.g. $5 + 3 = 8$ . We may simply map each of the traces to the sequence of arithmetic operations the model steps through, i.e. $r_i \\rightarrow [o_1, o_2, \\ldots, o_t]$ . This mapping is extracted by code. Then, given this set, we measure unique sequence of operations over the number of total traces.", + "bbox": [ + 112, + 210, + 885, + 272 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Semantic Diversity We measure the similarity of trace using cosine similarities between the text-embeddings (Bilmes, 2022; Yu et al., 2023).", + "bbox": [ + 111, + 275, + 885, + 308 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "F.1 Does temperature increase diversity?", + "text_level": 1, + "bbox": [ + 112, + 321, + 480, + 339 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Temperature does increase diversity, but it also increases the chances of sampling outlier answers.", + "bbox": [ + 111, + 349, + 830, + 367 + ], + "page_idx": 24 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 112, + 32, + 431, + 47 + ], + "page_idx": 24 + }, + { + "type": "page_number", + "text": "25", + "bbox": [ + 488, + 941, + 508, + 954 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/e5ccfdef5708eae2cdd50fb2c0053f33997475088945e324b1799671240c70ec.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 127, + 237, + 326, + 383 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/6505c814dcb35aa9757d8be7050bd89319b4a0aae6db8d965dfe1bf81985e105.jpg", + "image_caption": [ + "Diversity Across SFT [T=1.0]" + ], + "image_footnote": [], + "bbox": [ + 338, + 237, + 540, + 383 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/9edcca763df460a84257bc718bc153f2f8f5a4c6a79bbf78ad4b859738b6d86e.jpg", + "image_caption": [ + "Diversity Across SFT [T=0.8]" + ], + "image_footnote": [], + "bbox": [ + 549, + 237, + 750, + 383 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/6357c2ac829597a5cabbd3f6f2c13751aeaac97b2934f7e1144f19fda33b7246.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 756, + 227, + 864, + 369 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/7ad9bd5e57c1c23dbd6a2fe73d568d72272de9cf3ff807b8a8da06b7e3ec8421.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 127, + 421, + 326, + 565 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/c03ad321d119e163d7d652bd75fdd1be669582b2e192edd3f0c824e236fa1553.jpg", + "image_caption": [ + "Diversity Across SFT [T=1.5]" + ], + "image_footnote": [], + "bbox": [ + 339, + 421, + 542, + 565 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/946d993abc5cb2f9296711897d897572593e58f08517be74488fe99906faa457.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 550, + 421, + 750, + 565 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/4cca06e5c218704714c26e86a6ac995e438bcee795019154e74edbfe68091d0f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 756, + 411, + 864, + 553 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/6c18e77dd874f11856943ca580d37e31d9687c5a7be4bc53d7a94208e6f4079a.jpg", + "image_caption": [ + "Figure 15: Diversity of traces sampled with Temperature $\\in$ {0.8, 1.0, 1.5} for Gemma-2-2B SFT checkpoints on GSM8k" + ], + "image_footnote": [], + "bbox": [ + 127, + 604, + 326, + 750 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/445fd20e4c1373f0114643f0a40149f387735f7d61bc9ab734ce91f06f149ec6.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 339, + 604, + 540, + 750 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/c20602e164d649b2cede3ffcbe6d6d45d34e0fbfdb8701a97c0a3495fe22a13a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 550, + 597, + 750, + 750 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/04a8123274c3fe4a7a0f718441ab029fcec04c65de463ce4d5f4f8ddb1113e96.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 756, + 595, + 864, + 736 + ], + "page_idx": 25 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 114, + 32, + 431, + 47 + ], + "page_idx": 25 + }, + { + "type": "page_number", + "text": "26", + "bbox": [ + 488, + 941, + 508, + 953 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "F.2 How well do token-level diverse decoding strategies compare with optimal strategy with oracle?", + "text_level": 1, + "bbox": [ + 111, + 95, + 883, + 130 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Hyperparameter Tuning Details We grid search for optimal temperature for all baselines over $T = [0.8, 1.0, 1.2, 1.5, 1.8]$ . For nucleus, we choose the best cutoff threshold between $[0.8, 0.9, 0.95]$ . For min-p, we choose the best probability threshold between $[0.01, 0.05, 0.1]$ . For tokenwise top-k, we choose best k between $[12, 25, 50]$ .", + "bbox": [ + 109, + 142, + 885, + 205 + ], + "page_idx": 26 + }, + { + "type": "table", + "img_path": "images/c826bd5a0ec5354340e0780dded03d7ef2d94af5b3717c2da25e8b37e585943e.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Decoding StrategyPass@2Pass@4Pass@8
Naive0.5650.6660.760
Nucleus0.5660.6680.757
Min-p0.5660.6680.760
Top-k0.5630.6660.756
Top-k w/Oracle0.7600.8320.901
", + "bbox": [ + 290, + 215, + 707, + 325 + ], + "page_idx": 26 + }, + { + "type": "table", + "img_path": "images/68f30ce20b9b540bd97d34e0e8afd1dd50e712a9365bf753e3341f92825fd290.jpg", + "table_caption": [ + "Table 2: Best Pass@k of Sampling Strategies for Qwen-2.5-0.5B over SFT checkpoints" + ], + "table_footnote": [], + "table_body": "
Decoding StrategyPass@2Pass@4Pass@8
Naive0.5470.6480.737
Nucleus0.5280.6170.694
Min-p0.5500.6550.744
Top-k0.5380.6460.738
Top-k w/Oracle0.7300.8140.878
", + "bbox": [ + 290, + 378, + 707, + 487 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Table 3: Pass@k of Sampling Strategies for Qwen-2.5-0.5B at Last SFT Checkpoint", + "bbox": [ + 192, + 496, + 803, + 513 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/c6ea5a13b67d1c489b5482fd1dc7e9c590db2bda35acfbb90c820e3cee9fbbba.jpg", + "image_caption": [ + "Figure 16: Pass@K over different Min-P thresholds $\\gamma \\in [0,0.3]$ and temperatures $T\\in [1,1.6]$ for Gemma2-2B finetuned on GSM8K. Generally, no min-p threshold paired with high temperature $\\mathrm{T} = 1.6$ (in light green) is able to surpass the Pass@1 of $\\mathrm{T} = 1$ with best min-p threshold (in orange). In other words, unlike WiSE-FT which increases both Pass@1 and Pass@K, Pass@1 tends to still decrease for the diverse decoding strategy of applying min-p with high temperature." + ], + "image_footnote": [], + "bbox": [ + 117, + 537, + 877, + 696 + ], + "page_idx": 26 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 112, + 32, + 431, + 47 + ], + "page_idx": 26 + }, + { + "type": "page_number", + "text": "27", + "bbox": [ + 488, + 941, + 506, + 954 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/a63859f2d49730f3fc594d38e64daee690ae73ff3dfdfd4802371d22024209fe.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 117, + 95, + 305, + 237 + ], + "page_idx": 27 + }, + { + "type": "image", + "img_path": "images/e2ab7f724e76325f770f6dd199c9afc3e5801a548582d0e9ac5362497292c00e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 308, + 95, + 496, + 236 + ], + "page_idx": 27 + }, + { + "type": "image", + "img_path": "images/528a40964d3bfda6c2aef41d322a1d87b86f248ba95412cafdaaf3724d5c8979.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 506, + 95, + 689, + 236 + ], + "page_idx": 27 + }, + { + "type": "image", + "img_path": "images/510bcdc6999edfdaf5ad51d42c4d837bb3ee069afcc24f8a26bd6ee5bc71c4e7.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 699, + 95, + 880, + 236 + ], + "page_idx": 27 + }, + { + "type": "image", + "img_path": "images/39f305f282f3aa2b8d837c2869699fbd2bb84a64fa117b53ed57a515d2e954a8.jpg", + "image_caption": [ + "Figure 17: Pass@k of Gemma-2-2B GSM8k Naive Sampling with Replacement" + ], + "image_footnote": [], + "bbox": [ + 349, + 244, + 740, + 262 + ], + "page_idx": 27 + }, + { + "type": "image", + "img_path": "images/393c4e789004c9b1d25cabc33e6caa251b9a30ac8408881cef3e9408d407b11f.jpg", + "image_caption": [ + "Figure 18: Pass@k of Gemma-2-2B GSM8k Oracle Top K Sampling" + ], + "image_footnote": [], + "bbox": [ + 117, + 306, + 310, + 450 + ], + "page_idx": 27 + }, + { + "type": "image", + "img_path": "images/746a4c2b336ab6d80695d60f5f6112ea89a75779153cb6d50e8f7c5219e462ab.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 313, + 306, + 500, + 450 + ], + "page_idx": 27 + }, + { + "type": "image", + "img_path": "images/1eb6b1fd37997ea936e23fe598e2d89e0cb3ea24361199d8c518915d5a76ffb0.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 504, + 308, + 689, + 450 + ], + "page_idx": 27 + }, + { + "type": "image", + "img_path": "images/d3bd9e86127b96e78ed2a923eb025ecabea92613c76f789c22579bde4d166df7.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 694, + 308, + 880, + 450 + ], + "page_idx": 27 + }, + { + "type": "image", + "img_path": "images/a3080dc079b0901c51c02399a1907b2e2c96b0a922a54f5a446ed2ca4860e645.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 338, + 460, + 725, + 477 + ], + "page_idx": 27 + }, + { + "type": "image", + "img_path": "images/07caf13e3b113102c5e105ffade1b5592d6b69e20a12f6d7be497607fbc435bf.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 117, + 532, + 305, + 684 + ], + "page_idx": 27 + }, + { + "type": "image", + "img_path": "images/fc1e852587bf0d908433faf87683b609d90596c02a702dd55504a960a15c609c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 308, + 532, + 496, + 683 + ], + "page_idx": 27 + }, + { + "type": "image", + "img_path": "images/9e6d23e6964203140d9a54ab2c01c778ff9fc0a06702b556276de27f651dd276.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 496, + 532, + 689, + 683 + ], + "page_idx": 27 + }, + { + "type": "image", + "img_path": "images/010ca52fb4d6d2f4a74f158e3d4b742d0d60ff44e3344acc665ea390dcd0d87d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 689, + 532, + 880, + 683 + ], + "page_idx": 27 + }, + { + "type": "image", + "img_path": "images/34da879b88c056d54890866c39e610bb90307c00176ff655d5e91b80505a2801.jpg", + "image_caption": [ + "Figure 19: Pass@k of Qwen-2.5-0.5B GSM8k Naive Sampling with Replacement" + ], + "image_footnote": [], + "bbox": [ + 316, + 696, + 707, + 712 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "F.3 Diversity Comparison Between SFT and WiSE-FT", + "text_level": 1, + "bbox": [ + 111, + 766, + 589, + 784 + ], + "page_idx": 27 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 114, + 32, + 431, + 47 + ], + "page_idx": 27 + }, + { + "type": "page_number", + "text": "28", + "bbox": [ + 488, + 941, + 508, + 954 + ], + "page_idx": 27 + }, + { + "type": "image", + "img_path": "images/056234a694a700f1c7f01a6e9ed20094d7cd0fdb69eb5de2b5a653d849348ef9.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 117, + 152, + 310, + 303 + ], + "page_idx": 28 + }, + { + "type": "image", + "img_path": "images/8a3d27f0b455df3c98d77c12e7837d76fbf9b29395a7db957519d3cc75142e50.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 313, + 152, + 500, + 303 + ], + "page_idx": 28 + }, + { + "type": "image", + "img_path": "images/db799a546fac2266a4fb6e884a368cf79f696cc523c048ac9f7712be215438ae.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 500, + 152, + 691, + 303 + ], + "page_idx": 28 + }, + { + "type": "image", + "img_path": "images/f3af3a04795cd7ec6e134e72ceb0fd20ab57c4f2f3d29acda347037690cecaa8.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 691, + 154, + 880, + 303 + ], + "page_idx": 28 + }, + { + "type": "image", + "img_path": "images/335ad53fb8c267ab9f4d31675ae0ac9c056c01642d53698f0a201571736eb81f.jpg", + "image_caption": [ + "Figure 20: Pass@k of Qwen-2.5-0.5B GSM8k Oracle Top K Sampling" + ], + "image_footnote": [], + "bbox": [ + 323, + 315, + 709, + 333 + ], + "page_idx": 28 + }, + { + "type": "image", + "img_path": "images/ccfab5cfd7bae8ef782e079cda3aeda4adbe6f786122fc77799b75ec40133ee2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 179, + 488, + 815, + 636 + ], + "page_idx": 28 + }, + { + "type": "image", + "img_path": "images/5c78239b846481b7bf9c97f9b381676efebb712f06044215ffc0828a1d3181dd.jpg", + "image_caption": [ + "Figure 21: Operation, Semantic, and Answer Diversity of Gemma-2-2B checkpoints of SFT over GSM8K versus the corresponding WiSE-FT variants (with the earliest checkpoint). We decode with temperature set to 1.0." + ], + "image_footnote": [], + "bbox": [ + 181, + 643, + 815, + 792 + ], + "page_idx": 28 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 114, + 32, + 431, + 47 + ], + "page_idx": 28 + }, + { + "type": "page_number", + "text": "29", + "bbox": [ + 488, + 941, + 508, + 954 + ], + "page_idx": 28 + }, + { + "type": "image", + "img_path": "images/4546a62db2a2c93d5719df6b62375b8b3d804a1758924ed0422076a662d1a358.jpg", + "image_caption": [ + "Figure 22: Operation, Semantic, and Answer Diversity of Gemma-2-2B checkpoints of SFT over GSM8K versus the corresponding WiSE-FT variants (with the earliest checkpoint). We decode with temperature set to 1.6." + ], + "image_footnote": [], + "bbox": [ + 181, + 318, + 818, + 625 + ], + "page_idx": 29 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 114, + 32, + 431, + 47 + ], + "page_idx": 29 + }, + { + "type": "page_number", + "text": "30", + "bbox": [ + 490, + 941, + 508, + 954 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "G Best of K Evaluation", + "text_level": 1, + "bbox": [ + 112, + 95, + 377, + 113 + ], + "page_idx": 30 + }, + { + "type": "image", + "img_path": "images/9e3fdc034bc19b8587b4804417cfbed97b363f7d3658230fe4584772068195a9.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 114, + 138, + 305, + 242 + ], + "page_idx": 30 + }, + { + "type": "image", + "img_path": "images/b44675a6666bda0b73ef9e200a9b1d1022ee221c3d4530367974a9e78cee2014.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 308, + 140, + 496, + 242 + ], + "page_idx": 30 + }, + { + "type": "image", + "img_path": "images/aeb10270cd0a472e542fdfa57315bbbb7b5ba41555291cba501c05e400478d37.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 500, + 140, + 689, + 242 + ], + "page_idx": 30 + }, + { + "type": "image", + "img_path": "images/b2d2e4fd5b348100cefd256746110d2df91d3244a48b747cbf5561ad703f3c6e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 692, + 140, + 880, + 242 + ], + "page_idx": 30 + }, + { + "type": "image", + "img_path": "images/ec5aabdb8afc02d520db433b2ed880cfb460293ab4f514cf1cd416a0e05b8f01.jpg", + "image_caption": [ + "Figure 23: Best@K performance on MATH500 with ORM verifier, comparing different SFT and WiSE-FT checkpoints of Qwen-2.5-0.5B for $K = 2,4,8,32$" + ], + "image_footnote": [], + "bbox": [ + 380, + 247, + 616, + 268 + ], + "page_idx": 30 + }, + { + "type": "image", + "img_path": "images/814e37e4a8cfad542f088af46bbd948f458f6686fdea387a041191635dae743d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 116, + 335, + 377, + 489 + ], + "page_idx": 30 + }, + { + "type": "image", + "img_path": "images/dcc8297332fec4d4f444504910cf3c945a0afaf6ec9a11be41dfb59a185c4df3.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 380, + 335, + 629, + 489 + ], + "page_idx": 30 + }, + { + "type": "image", + "img_path": "images/04d3dcc21b958936bdc9a1f91c07188c307c9efefc4ccbe3518792514baf7514.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 633, + 335, + 880, + 489 + ], + "page_idx": 30 + }, + { + "type": "image", + "img_path": "images/568239dfe0d053038302fc7fbde2c6756bc3312c7e80cec28090883679638505.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 349, + 500, + 643, + 527 + ], + "page_idx": 30 + }, + { + "type": "image", + "img_path": "images/860ad658417f481695762b3985aa86cc6c8648a27a1974383b58f81738a321ca.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 116, + 531, + 372, + 684 + ], + "page_idx": 30 + }, + { + "type": "image", + "img_path": "images/b0c17f52c3d8d9f710084b2e71cd83327cb690eb5a0d9b91600ddb7e86032d53.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 383, + 531, + 625, + 684 + ], + "page_idx": 30 + }, + { + "type": "image", + "img_path": "images/81519a54b17fc6c666684e274696d0ac2127e9fe4ff88d0d9326a87c6149a208.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 638, + 531, + 879, + 684 + ], + "page_idx": 30 + }, + { + "type": "image", + "img_path": "images/205af94455736f91afbef56faed86d6536d4884d2490f957f228fe6f9d772c60.jpg", + "image_caption": [ + "Figure 24: Best@K performance on MATH500 with ORM (Top) and Majority Vote (Bottom) for early, middle, and late SFT checkpoints and WiSE-FT counterparts, showing Qwen-2.5-0.5B's scaling across K values." + ], + "image_footnote": [], + "bbox": [ + 346, + 696, + 645, + 722 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "H Diversity Collapse and WiSE-FT Results for the Coding Task", + "text_level": 1, + "bbox": [ + 111, + 805, + 803, + 825 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "To test whether coding tasks exhibit the same diversity collapse observed in reasoning benchmarks, we fine-tuned the Qwen2.5-coder-0.5B model for 10 epochs on the Magicoder-Evol-Instruct-110K dataset, following the Stage 2 SFT recipe from OpenCoder LLM. We then applied WiSE-FT by interpolating the weights of the second SFT checkpoint with the initial model using interpolation ratio 0.5. Both the original SFT checkpoints and their WiSE-FT counterparts were evaluated on HumanEval for pass@k.", + "bbox": [ + 109, + 840, + 885, + 919 + ], + "page_idx": 30 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 112, + 32, + 431, + 47 + ], + "page_idx": 30 + }, + { + "type": "page_number", + "text": "31", + "bbox": [ + 488, + 941, + 506, + 954 + ], + "page_idx": 30 + }, + { + "type": "image", + "img_path": "images/60a8b9a9af69b7127d68729341fe2361371efa2b3e4bacb8eec3f42fc9ec84a0.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 114, + 94, + 302, + 199 + ], + "page_idx": 31 + }, + { + "type": "image", + "img_path": "images/9dc1172db134cef8ae1e854cc6f68ebfe8e6c96aaeacb27c5b0c870ad9752a67.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 307, + 94, + 496, + 199 + ], + "page_idx": 31 + }, + { + "type": "image", + "img_path": "images/cafeaf2d7bb4404026a1ba3699040624b79be50df113c5458aaa97d41afc6c76.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 501, + 95, + 687, + 199 + ], + "page_idx": 31 + }, + { + "type": "image", + "img_path": "images/1e7d7a28f9f5517750d4bcda26579c66a321c8f7a9d9072a3c097f819faa0084.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 694, + 95, + 880, + 199 + ], + "page_idx": 31 + }, + { + "type": "image", + "img_path": "images/d837c799a3424c44d929865ee8c5c17f2da827b7e87b44e7380e31803ce3e9b3.jpg", + "image_caption": [ + "Figure 25: Best@K performance on MATH500 with majority voting, comparing different SFT and WiSE-FT checkpoints of Qwen-2.5-0.5B for $K = 2, 4, 8, 32$" + ], + "image_footnote": [], + "bbox": [ + 377, + 204, + 614, + 224 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "We found that, much like in mathematical reasoning tasks, SFT on coding data indeed suffers from diversity collapse: although pass@1 steadily improves over epochs, pass@k begins to deteriorate. And WiSE-FT still improves performance and mitigates the diversity collapse.", + "bbox": [ + 109, + 294, + 883, + 342 + ], + "page_idx": 31 + }, + { + "type": "image", + "img_path": "images/cb3ca09225ec7f2708663a63adf1bb7d1807c672a937ff5625e5894c7f467191.jpg", + "image_caption": [ + "HumanEval - Pass@k Across SFT Checkpoints" + ], + "image_footnote": [], + "bbox": [ + 117, + 376, + 364, + 518 + ], + "page_idx": 31 + }, + { + "type": "image", + "img_path": "images/293bfc0fc27150b8eec52e2f179c7a8bee6d05de89eb6f954bb89407cf535e84.jpg", + "image_caption": [ + "Figure 26: Pass@K performance of SFT checkpoints on HumanEval (temperature = 1.0)." + ], + "image_footnote": [], + "bbox": [ + 372, + 377, + 620, + 518 + ], + "page_idx": 31 + }, + { + "type": "image", + "img_path": "images/dc80c75a67d472177dcc63f0074f37ec267e332b7e7317accb544868294cd22e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 632, + 378, + 879, + 518 + ], + "page_idx": 31 + }, + { + "type": "image", + "img_path": "images/aa8a4b4c00fe197e0575e11b2fecb88604a4562cbe6fb459230d83e7172326d4.jpg", + "image_caption": [ + "HumanEval - Pass@k Across Checkpoints (SFT vs WiSE-FT)" + ], + "image_footnote": [], + "bbox": [ + 117, + 593, + 364, + 734 + ], + "page_idx": 31 + }, + { + "type": "image", + "img_path": "images/6f01ccaf5c53f26ffda8ffe2a095592c21beb991d051e6b3ecc692744b009663.jpg", + "image_caption": [ + "Figure 27: Comparison of pass@K for SFT checkpoints and their WiSE-FT counterparts at $k = 1$ , 16, 64." + ], + "image_footnote": [], + "bbox": [ + 374, + 593, + 620, + 734 + ], + "page_idx": 31 + }, + { + "type": "image", + "img_path": "images/adce33339af950e6097d8809ac7898c79d536d03de13d3b42c8e47246adb4ef3.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 632, + 594, + 877, + 734 + ], + "page_idx": 31 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 114, + 32, + 431, + 47 + ], + "page_idx": 31 + }, + { + "type": "page_number", + "text": "32", + "bbox": [ + 488, + 941, + 508, + 954 + ], + "page_idx": 31 + }, + { + "type": "image", + "img_path": "images/b5e95e0338cedd9b1f43533a5dbb16642aa6e174015a5f1dd91e1e8a66e77aa3.jpg", + "image_caption": [ + "HumanEval - Last Checkpoint (1700) Comparison: SFT vs WiSE-FT", + "Figure 28: Pass@K performance of the final SFT checkpoint versus its WiSE-FT variant." + ], + "image_footnote": [], + "bbox": [ + 119, + 349, + 867, + 657 + ], + "page_idx": 32 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 114, + 32, + 431, + 47 + ], + "page_idx": 32 + }, + { + "type": "page_number", + "text": "33", + "bbox": [ + 488, + 941, + 508, + 954 + ], + "page_idx": 32 + } +] \ No newline at end of file diff --git a/data/2025/2504_10xxx/2504.10478/c3757ad9-b534-4066-bb62-c73ee2cc8c04_model.json b/data/2025/2504_10xxx/2504.10478/c3757ad9-b534-4066-bb62-c73ee2cc8c04_model.json new file mode 100644 index 0000000000000000000000000000000000000000..4a68d66b94f39ea6f857f91a55601abc4eb3e71a --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/c3757ad9-b534-4066-bb62-c73ee2cc8c04_model.json @@ -0,0 +1,6096 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.114, + 0.033, + 0.435, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "title", + "bbox": [ + 0.112, + 0.097, + 0.658, + 0.151 + ], + "angle": 0, + "content": "Weight Ensembling Improves Reasoning in Language Models" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.157, + 0.841, + 0.175 + ], + "angle": 0, + "content": "Xingyu Dang\\*,1 Christina Baek\\*,2 Kaiyue Wen3 Zico Kolter2 Aditi Raghunathan2" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.179, + 0.692, + 0.198 + ], + "angle": 0, + "content": "\\(^{1}\\) Tsinghua University \\(^{2}\\) Carnegie Mellon University \\(^{3}\\) Stanford University" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.209, + 0.566, + 0.224 + ], + "angle": 0, + "content": "\\(\\text{品}\\) dangxy20@mails.tsinghua.edu.cn,kbaek@andrew.cmu.edu" + }, + { + "type": "title", + "bbox": [ + 0.459, + 0.261, + 0.542, + 0.278 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.299, + 0.832, + 0.496 + ], + "angle": 0, + "content": "We investigate a failure mode that arises during the training of reasoning models, where the diversity of generations begins to collapse, leading to suboptimal test-time scaling. Notably, the Pass@1 rate reliably improves during supervised finetuning (SFT), but Pass@k rapidly deteriorates. Surprisingly, a simple intervention of interpolating the weights of the latest SFT checkpoint with an early checkpoint, otherwise known as WiSE-FT, almost completely recovers Pass@k while also improving Pass@1. The WiSE-FT variant achieves better test-time scaling (Best@k, majority vote) and achieves superior results with less data when tuned further by reinforcement learning. Finally, we find that WiSE-FT provides complementary performance gains that cannot be achieved only through diversity-inducing decoding strategies, like temperature scaling. We formalize a bias-variance tradeoff of Pass@k with respect to the expectation and variance of Pass@1 over the test distribution. We find that WiSE-FT can reduce bias and variance simultaneously, while temperature scaling inherently trades off between bias and variance." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.528, + 0.291, + 0.546 + ], + "angle": 0, + "content": "1 Introduction" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.57, + 0.888, + 0.709 + ], + "angle": 0, + "content": "Recent advances in large language models (LLMs) have showcased their remarkable ability to perform complex reasoning, yet these successes often hinge on test-time scaling strategies (Lightman et al., 2023; Snell et al., 2024; Wu et al., 2024). In many applications, such as math problems, puzzles, and logical reasoning, LLMs employ a verification framework where it is significantly easier for the model to verify a candidate solution than to generate one from scratch. This distinction has given rise to strategies that sample multiple \"reasoning traces\" or sequences of reasoning steps during inference, selecting the best final guess through an outcome reward model (ORM) or majority vote. In this setting, an upper bound on the performance a model could achieve is measured by Pass@K, or the probability that at least one out of \\( K \\) independently sampled reasoning traces is correct." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.713, + 0.886, + 0.868 + ], + "angle": 0, + "content": "Unfortunately, while the standard training pipeline of supervised finetuning (SFT) followed by reinforcement learning (RL) dependably improves Pass@1 for reasoning, Pass@K tends to drop early into finetuning (Cobbe et al., 2021; Chow et al., 2024a; Chen et al., 2025). This mismatch arises from a symptom of finetuning called diversity collapse, where overtuned models yield less diverse generations. This is detrimental to Pass@K since the model wastes \\( K \\) attempts on only a handful of guesses. In fact, by analyzing the model's error rate i.e., 1 - Pass@1, across the test distribution, we derive a Pass@K bias-variance trade-off. To improve expected test Pass@K, one can either reduce the bias which is the expected error rate or how much the model's error rate varies across problems. The latter term is connected to diversity - more diversity allows models to hedge and do uniformly well across all test questions. In particular, during SFT, Pass@1 improves (bias ↓) at the cost of diversity collapse (variance ↑)." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.872, + 0.886, + 0.921 + ], + "angle": 0, + "content": "Surprisingly, common ways of alleviating diversity collapse, such as early stopping at peak Pass@K or decoding with high temperature, suffer from the reverse trade-off: diversity improves (variance \\(\\downarrow\\)) at the cost of overall Pass@1 degrading (bias \\(\\uparrow\\)). Consequently, in this paper we are concerned with a central question:" + }, + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.285, + 0.059, + 0.717 + ], + "angle": 270, + "content": "arXiv:2504.10478v4 [cs.LG] 7 Oct 2025" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.944, + 0.506, + 0.955 + ], + "angle": 0, + "content": "1" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.115, + 0.033, + 0.432, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "image", + "bbox": [ + 0.132, + 0.098, + 0.368, + 0.285 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.373, + 0.099, + 0.619, + 0.286 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.624, + 0.099, + 0.868, + 0.285 + ], + "angle": 0, + "content": null + }, + { + "type": "image_footnote", + "bbox": [ + 0.16, + 0.291, + 0.836, + 0.309 + ], + "angle": 0, + "content": "--- SFT T=0.7 --- SFT T=1.0 WiSE-FT T=1.0 SFT T=1.3 SFT T=1.6" + }, + { + "type": "image_caption", + "bbox": [ + 0.111, + 0.326, + 0.885, + 0.39 + ], + "angle": 0, + "content": "Figure 1: Pass@k of WiSE-FT versus SFT on GSM8k Gemma-2-2B supervised finetuned and evaluated on GSM8k. At each SFT timestep \\( t \\), we evaluate Pass@k of checkpoint \\( w_{t} \\) (in dashed) with its WiSE-FT variant \\( 1/2 \\cdot w_{t} + 1/2 \\cdot w_{0} \\) (in solid), where traces are independently sampled with temperature \\( T = [0.7, 1.0, 1.3, 1.6] \\)." + }, + { + "type": "text", + "bbox": [ + 0.123, + 0.413, + 0.877, + 0.448 + ], + "angle": 0, + "content": "Is it possible to simultaneously improve both Pass@1 and Pass@K, thereby overcoming the bias-variance tradeoff inherent in current approaches?" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.457, + 0.885, + 0.595 + ], + "angle": 0, + "content": "In our work, we introduce a simple, scalable and effective intervention that allows models to achieve both high Pass@K and Pass@1 across mathematical reasoning tasks GSM8k, MATH, and AIME. The specific technique we use is a variant of WiSE-FT (Wortsman et al., 2022) where we interpolate the weights of the latest SFT checkpoint \\( \\boldsymbol{w}_t \\) with an early checkpoint \\( w_0 \\) as \\( \\boldsymbol{w}_{\\mathrm{WiSE}(t)} = \\frac{1}{2} \\cdot \\boldsymbol{w}_0 + \\frac{1}{2} \\cdot \\boldsymbol{w}_t \\). Our key finding is that WiSE-FT successfully merges the diverse sampling capabilities of earlier checkpoints while retaining or surpassing the Pass@1 of later checkpoints. In Figure 1, we observe that the WiSE-FT model achieves both higher Pass@K and Pass@1 with more SFT steps \\( t \\), unlike naive SFT which suffers from an early decay in Pass@K. Moreover, the gains with WiSE-FT is unachievable by early-stopping or diversity-aware decoding alone." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.601, + 0.884, + 0.648 + ], + "angle": 0, + "content": "Thus, we propose a new paradigm of training reasoning models: 1.) Train extensively using SFT as long as Pass@1 improves, 2.) Perform WiSE-FT with an earlier SFT checkpoint, 3.) Continue tuning the WiSE-FT variant using RL. Overall, the WiSE-FT model has the following immediate practical benefits:" + }, + { + "type": "text", + "bbox": [ + 0.123, + 0.659, + 0.88, + 0.703 + ], + "angle": 0, + "content": "- Better Test-Time Scaling Across all datasets and base models, the WiSE-FT variant achieves the highest performance with test-time scaling (Majority Vote, ORM) compared to an overtrained SFT model paired with diversity-aware decoding." + }, + { + "type": "text", + "bbox": [ + 0.123, + 0.709, + 0.88, + 0.783 + ], + "angle": 0, + "content": "- Better Reinforcement Learning Since RL uses self-generated data to tune models, to generalize reliably, it is important for generations to provide sufficient learning signal while also having high coverage over the data space. We find that continued RL training starting from WiSE-FT weights achieves superior results with less synthetic data compared to initializing RL from the last SFT checkpoint and even early-stopped SFT." + }, + { + "type": "list", + "bbox": [ + 0.123, + 0.659, + 0.88, + 0.783 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.797, + 0.885, + 0.92 + ], + "angle": 0, + "content": "In summary, we provide a comprehensive analysis of how reasoning models suffer from diversity collapse during SFT and its negative downstream impact during RL and test-time scaling. We first discuss our WiSE-FT findings in §4. Motivated by this discovery, we investigate two fundamental questions. First, we investigate diversity collapse during SFT and RL of reasoning models in §5. Diversity collapse not only impacts the model's ability to attempt different guesses. In fact, we make an even stronger observation - the generations of reasoning models converge towards a single reasoning trace for each test question. We theoretically prove that standard RL algorithms (i.e., REINFORCE and GRPO) fail to recover lost diversity in a simplified discrete bandit setting." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.943, + 0.506, + 0.955 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.114, + 0.033, + 0.432, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.097, + 0.885, + 0.19 + ], + "angle": 0, + "content": "Second, we formalize the competing goals of Pass@1 and Pass@K as a bias-variance trade-off in §6. We empirically measure and compare the bias and variance of WiSE-FT versus early-stopping versus high temperature decoding. Notably, only WiSE-FT reduces both bias and variance. We conclude with a remark on the limitations of decoding strategies such as top-k (Shao et al., 2017), nucleus (Holtzman et al., 2020), and min-p (Nguyen et al., 2024), at eliciting the maximum capabilities with test-time scaling from current reasoning models." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.204, + 0.312, + 0.223 + ], + "angle": 0, + "content": "2 Related Works" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.24, + 0.885, + 0.455 + ], + "angle": 0, + "content": "Diversity collapse with SFT: The standard pipeline for enhancing reasoning in LLMs involves an initial phase of supervised fine-tuning (SFT) followed by reinforcement learning (RL) (Guo et al., 2025; Setlur et al., 2024). SFT is critical for instilling interpretable and readable reasoning chains and ensuring that the model adheres to a consistent rollout templates (Guo et al., 2025). However, a number of recent works have identified critical pitfalls of SFT that hinders the model's ability to explore and ultimately it's overall problem solving ability. Notably, Cobbe et al. (2021) observe diversity collapse when finetuning on GSM8k training dataset, during which the Pass@1 continuously improves whereas Pass@k starts to fall shortly into the training. Similar diversity collapse phenomenon also exists in the self-improvement setting with SFT (Song et al., 2024), and is theoretically investigated as the sharpening effect (Huang et al., 2024). This is not desirable as diverse sampling at inference is important for test-time scaling using majority voting (Wang et al., 2023) or reward model guided search (Setlur et al., 2024; Beeching et al., 2024). Yeo et al. (2025); Chu et al. (2025) attribute this behavior to overfitting, memorization of samples and overfixation to a template style leading to reduced generalization. In our work, we corroborate similar findings and propose ensembling over the course of SFT as a mitigation strategy." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.477, + 0.885, + 0.691 + ], + "angle": 0, + "content": "Mitigating diversity collapse: Given the importance of diversity for effectively scaling inference-time compute, several recent works have proposed auxiliary finetuning objectives and decoding strategies to mitigate diversity collapse. Li et al. (2025) regularize the SFT process using a game-theoretic framework that encourages sparse updates, thereby preserving output diversity. Zhang et al. (2024b) directly optimizes for diversity during finetuning. Other approaches modify the finetuning procedure to directly optimize for Best-of-N sampling at inference time (Chow et al., 2024b; Sessa et al., 2024; Chen et al., 2025). Another line of work focuses on inference-time decoding, explicitly encouraging diverse solutions through modified beam search strategies (Vijayakumar et al., 2018; Olausson et al., 2024; Chen et al., 2024; Beeching et al., 2024). Li et al. (2023) improve diversity during parallel decoding by appending curated prompts to the input. In formal reasoning settings e.g., Lean, methods such as Monte Carlo tree search have been used to diversify intermediate reasoning steps, as demonstrated in AlphaProof (AlphaProof and AlphaGeometry teams, 2024). In this work, we identify a simple and complementary intervention during the finetuning process to maintain the diversity of generations. We especially care about enforcing diversity while preserving the overall accuracy of generations." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.705, + 0.564, + 0.726 + ], + "angle": 0, + "content": "3 Preliminaries and Experimental Setup" + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.743, + 0.473, + 0.761 + ], + "angle": 0, + "content": "3.1 Pass@k, Best@k, and Majority Vote" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.772, + 0.885, + 0.837 + ], + "angle": 0, + "content": "Given a reasoning model \\( f(\\cdot) \\), a decoding strategy \\( D \\), and problem \\( x \\), the model's solution is obtained by sampling a reasoning trace \\( r := [x, s^{(1)}, s^{(2)}, \\dots, s^{(n)}, \\hat{y}] \\) consisting of a sequence of intermediate steps \\( s^{(i)} \\) and a final guess \\( \\hat{y} \\). Given \\( k \\) independently sampled traces, Pass@K measures the probability that at least one guess matches the true answer \\( y \\):" + }, + { + "type": "equation", + "bbox": [ + 0.227, + 0.842, + 0.887, + 0.866 + ], + "angle": 0, + "content": "\\[\n\\operatorname {P a s s} @ \\mathrm {K} (x) = \\mathbb {E} _ {[ \\boldsymbol {r} _ {i} ] _ {i = 1} ^ {k} \\sim D (f (x))} [ \\mathbb {1} \\{\\exists i \\in [ k ] \\text {s . t .} \\hat {y} _ {i} = y \\} ] = 1 - (1 - \\rho_ {x}) ^ {K} \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.871, + 0.885, + 0.921 + ], + "angle": 0, + "content": "where \\(\\rho_{x} = P(\\hat{y} = y\\mid x,f,D)\\) is the Pass@1 or marginal probability of sampling the ground truth answer. Then \\((1 - \\rho_x)^K\\) is the probability that all \\(K\\) guesses are incorrect. We will refer to Pass@1 as \\(\\rho_{x}\\) interchangeably in our paper." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.943, + 0.506, + 0.955 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.114, + 0.033, + 0.432, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.097, + 0.884, + 0.128 + ], + "angle": 0, + "content": "In practice, test-time compute is scaled by selecting one of \\( K \\) guesses either by a output reward model (ORM) or Majority Vote. Then we can measure Best@K as" + }, + { + "type": "equation", + "bbox": [ + 0.166, + 0.139, + 0.831, + 0.185 + ], + "angle": 0, + "content": "\\[\n\\operatorname {B e s t} @ \\mathrm {K} (x) = \\mathbb {E} _ {[ \\boldsymbol {r} _ {i} ] _ {i = 1} ^ {k} \\sim D (f (x))} [ \\hat {y} _ {i ^ {*}} = y ] \\text {w h e r e} i ^ {*} = \\arg \\max _ {i \\in [ K ]} \\sum_ {j = 1} ^ {K} \\mathbb {1} \\left\\{\\hat {y} _ {i} = \\hat {y} _ {j} \\right\\} \\text {o r} \\operatorname {O R M} (\\boldsymbol {r} _ {i})\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.193, + 0.885, + 0.24 + ], + "angle": 0, + "content": "Notably, Pass@K is equivalent to Best@K using a perfect ORM verifier. As we will observe, WiSE-FT achieves both higher Pass@1 and Pass@K and this directly translates to achieving better Best@K with an ORM verifier and by Majority Vote." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.255, + 0.48, + 0.273 + ], + "angle": 0, + "content": "3.2 Weight-Space Ensembling (WiSE-FT)" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.285, + 0.884, + 0.361 + ], + "angle": 0, + "content": "WiSE-FT is a weight-space ensembling technique proposed by Wortzman et al. (2022) to improve the out-of-distribution accuracy of finetuned models at no extra computational cost. In particular, while models tend to achieve better in-distribution performance after finetuning, they tend to be less robust to distribution shift. Surprisingly, by simply interpolating the weights of the finetuned model \\( \\boldsymbol{w}_t \\) with the pretrained weights \\( \\boldsymbol{w}_0 \\)" + }, + { + "type": "equation", + "bbox": [ + 0.375, + 0.371, + 0.884, + 0.391 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {w} _ {\\mathrm {W i S E} (t)} = \\delta \\cdot \\boldsymbol {w} _ {0} + (1 - \\delta) \\cdot \\boldsymbol {w} _ {t} \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.398, + 0.884, + 0.461 + ], + "angle": 0, + "content": "WiSE-FT can achieve best of both words: the out-of-distribution accuracy of models improves without incurring a drop in in-distribution accuracy. Similar to this philosophy, we apply weight ensembling to achieve both the diverse generation ability of early SFT checkpoints while maintaining the high Pass@1 accuracy of later SFT checkpoints." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.475, + 0.449, + 0.494 + ], + "angle": 0, + "content": "3.3 Training and Evaluation Pipeline" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.505, + 0.885, + 0.659 + ], + "angle": 0, + "content": "The majority of our experiments are conducted on Gemma-2-2B and Qwen-2.5-0.5B. We perform SFT on a 30K subset of rephrased augmentations of GSM8k (Cobbe et al., 2021) and MATH (Hendrycks et al., 2021) in MetaMath40k (Yu et al., 2023) for 1710 steps or 10 epochs. We then continue finetuning on another 30K subset of rephrased training questions from MetaMath using Group Relative Policy Optimization (GRPO) with a binary reward of the correctness of the model's final answer. Finally, we evaluate models on GSM8K and MATH500, respectively. To estimate the true Pass@K and Pass@1 marginalized over the distribution of sampled traces, we sample 100 reasoning traces per test example and average over them to estimate Pass@1, i.e. \\(\\rho_{x}\\). Then to calculate Pass@K, we use the theoretical formula \\(1 - (1 - \\rho_{x})^{K}\\) in Equation 1. Unless noted otherwise, we employ a naive decoding strategy with top-p threshold 0.9, temperature \\(T = 0.8\\), and top-k with \\(K = 50\\)." + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.675, + 0.734, + 0.697 + ], + "angle": 0, + "content": "4 Improving Diverse Reasoning Capabilities by WiSE-FT" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.713, + 0.885, + 0.806 + ], + "angle": 0, + "content": "We first carefully track Pass@K for \\( K \\in \\{1, 4, 32\\} \\) across the SFT trajectory of Qwen-2.5-0.5B and Gemma-2-2B. Similar to findings from Cobbe et al. (2021); Chen et al. (2025), we observe that Pass@1 continues to improve with longer SFT, whereas for larger \\( K = 4, 32 \\), Pass@K tends to peak much earlier on in training (in Figure 1, 17, and 19). In other words, while later SFT checkpoints achieve higher Pass@1, earlier SFT checkpoint achieve higher Pass@K. This tradeoff in model selection is not ideal downstream for test-time scaling." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.812, + 0.884, + 0.922 + ], + "angle": 0, + "content": "Building upon this intuition, we propose weight ensembling between earlier and later SFT checkpoints. We apply a variant of WiSE-FT where instead of the pretrained model, we interpolate between the earliest SFT checkpoint (in our case, after 1 epoch of training) and the weights of later checkpoint. As shown in Figure 2, we observe a \"sweet spot\" of interpolation coefficients \\(\\delta \\in (0,1)\\) where the WiSE-FT model achieves both higher Pass@1 than the last SFT model and higher Pass@K than the early SFT model. We will fix \\(\\delta = 1/2\\), which generally performs decently for all of the datasets we've tested. In fact, after WiSE-FT \\(w_{\\mathrm{WiSE}(t)}\\), both Pass@1 and Pass@k grow monotonically with SFT steps \\(t\\) (see Figure 1)." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.943, + 0.505, + 0.955 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.115, + 0.033, + 0.432, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "image", + "bbox": [ + 0.119, + 0.098, + 0.365, + 0.282 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.379, + 0.099, + 0.621, + 0.283 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.639, + 0.099, + 0.882, + 0.283 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.111, + 0.297, + 0.885, + 0.375 + ], + "angle": 0, + "content": "Figure 2: Pass@1 vs. Pass@K across Interpolation Coefficients We perform WiSEFT with \\(\\delta \\in [0.1, 0.9]\\) between the first and last checkpoints of model (in legend) finetuned on GSM8K, MATH, and OpenThoughts-114K, then evaluate on GSM8K, MATH500, and AIME24, respectively. Early SFT model observe higher Pass@K (y-axis) while later SFT model observes higher Pass@1 (x-axis). The interpolated model observe best of both metrics." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.406, + 0.885, + 0.513 + ], + "angle": 0, + "content": "Better Test-Time Scaling This boost in both Pass@1 and Pass@K directly translates to better performance with test-time scaling. We measure Best@K by Majority Vote and by selecting the reasoning trace with highest reward using an off-the-shelf ORM RLHFlow/Llama3.1-8B-PRM-Deepseek-Data (Xiong et al., 2024). We evaluate the performance of the last SFT checkpoint with highest Pass@1 versus the corresponding WiSE-FT variant with \\(\\delta = 1/2\\). In Figure 3, we see that the performance gap on MATH500 between the final Gemma-2-2B SFT checkpoint and Wise-FT model widens with larger \\(K\\). The WiSE-FT model achieves \\(5 - 7\\%\\) better performance with test-time scaling." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.551, + 0.885, + 0.718 + ], + "angle": 0, + "content": "Better RL Scaling WiSE-FT's ability to achieve both high Pass@1 and Pass@K is particularly advantageous for continued RL training where models are further trained by policy gradient methods using self-generated data. In particular, WiSE-FT is able to generate data rich in learning signal (high Pass@1) while still having high coverage over the data space (high Pass@K). We continue training on rephrased training questions of GSM8K and MATH using GRPO paired with a binary reward of the correctness of the final guess. Across runs, we observe that continued RL training starting from the final WiSE-FT model improves performance more stably than finetuning starting from the final SFT checkpoint. Notably the final SFT checkpoint suffers low coverage over the data space, causing Pass@1 to improve slowly. We also try continued RL training from an earlier SFT checkpoint with peak Pass@4 performance. While RL scales better over the early SFT checkpoint in comparison to the final checkpoint, the performance still remains subpar compared to WiSE-FT." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.736, + 0.468, + 0.755 + ], + "angle": 0, + "content": "4.1 General Purpose Reasoning Models" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.767, + 0.885, + 0.92 + ], + "angle": 0, + "content": "So far we have studied the effect of WiSE-FT on models tuned on reasoning data for the same specific reasoning task (e.g., train on GSM8k and evaluate on GSM8k). We've additionally tested how well our findings generalize to models trained on general purpose reasoning datasets and tested on a out-of-distribution reasoning task. We take Qwen2.5-7B-Instruct and SFT for 5 epochs on OpenThoughts-114k, a high-quality synthetic dataset of math, science, and coding questions paired with DeepSeek-R1 completions, then evaluate its performance on AIME24 competition problems (with ASY code for figures from Muennighoff et al. (2025)). In this setting, the Pass@K trends during SFT on is more subtle. We still observe diversity collapse in Figure 12, but the affect is not strong enough for Pass@K to drop back down. However, we observe that the rate at which Pass@K improves for \\( K \\in \\{16,32\\} \\) slows down early while Pass@1 grows at a constant rate (Figure 10). We then perform WiSE-FT between the final and earlier checkpoint with" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.943, + 0.506, + 0.955 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.115, + 0.033, + 0.432, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "image", + "bbox": [ + 0.118, + 0.097, + 0.338, + 0.282 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.342, + 0.098, + 0.552, + 0.282 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.562, + 0.1, + 0.872, + 0.285 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.324, + 0.292, + 0.347, + 0.307 + ], + "angle": 0, + "content": "(a)" + }, + { + "type": "image", + "bbox": [ + 0.119, + 0.31, + 0.553, + 0.516 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.324, + 0.525, + 0.347, + 0.541 + ], + "angle": 0, + "content": "(b)" + }, + { + "type": "image", + "bbox": [ + 0.565, + 0.293, + 0.878, + 0.513 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.711, + 0.523, + 0.731, + 0.538 + ], + "angle": 0, + "content": "(c)" + }, + { + "type": "image_caption", + "bbox": [ + 0.111, + 0.551, + 0.887, + 0.645 + ], + "angle": 0, + "content": "Figure 3: Downstream Advantages of WiSE-FT: (a) Best@K on MATH500 of the final SFT Gemma2-2B checkpoint and its WiSE-FT counterpart. (b) Pass@K on AIME24 WiSE-FT after SFT on general purpose reasoning dataset OpenThoughts-114k achieves higher Pass@K on AIME24. (c) RL Scaling Gemma and Qwen SFT checkpoints further tuned by GRPO on GSM8K and MATH, respectively. RL from the final WiSE-FT model achieves higher Pass@1 with less data compared to GRPO starting from both early and late SFT checkpoints." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.67, + 0.885, + 0.703 + ], + "angle": 0, + "content": "higher diversity. We choose early checkpoint at epoch 3 where improvements in Pass@K begin to slow. Similarly, we observe that WiSE-FT improves both Pass@1 and Pass@K in Figure 2." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.719, + 0.553, + 0.741 + ], + "angle": 0, + "content": "5 Diversity Collapse during Finetuning" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.755, + 0.885, + 0.803 + ], + "angle": 0, + "content": "In previous sections we alluded to the phenomenon where \\(\\mathrm{Pass}@\\mathrm{K}\\) decreases because SFT and RL induces diversity collapse in reasoning traces. To verify this hypothesis, we sample 100 traces per test GSM8k problem and measure diversity using three metrics:" + }, + { + "type": "text", + "bbox": [ + 0.151, + 0.814, + 0.751, + 0.832 + ], + "angle": 0, + "content": "1. Answer Diversity: The fraction of unique guesses \\(\\hat{y}\\) among reasoning traces." + }, + { + "type": "text", + "bbox": [ + 0.151, + 0.836, + 0.887, + 0.883 + ], + "angle": 0, + "content": "2. Operation Diversity: The fraction of unique sequence of arithmetic operations performed among reasoning traces (In GSM8k, each intermediate step consists of a basic arithmetic operation, e.g. \\( 5 + 3 = 8 \\))." + }, + { + "type": "text", + "bbox": [ + 0.151, + 0.887, + 0.885, + 0.92 + ], + "angle": 0, + "content": "3. Semantic Diversity: The average cosine similarity between the text embeddings of the reasoning traces, computed using Stella-400M-v5 (Zhang et al., 2024a)" + }, + { + "type": "list", + "bbox": [ + 0.151, + 0.814, + 0.887, + 0.92 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.944, + 0.506, + 0.955 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.115, + 0.033, + 0.432, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "image_caption", + "bbox": [ + 0.402, + 0.097, + 0.596, + 0.111 + ], + "angle": 0, + "content": "Diversity Across SFT \\([T = 0.8]\\)" + }, + { + "type": "image", + "bbox": [ + 0.163, + 0.121, + 0.344, + 0.253 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.354, + 0.12, + 0.538, + 0.253 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.545, + 0.113, + 0.725, + 0.253 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.729, + 0.112, + 0.83, + 0.248 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.112, + 0.271, + 0.885, + 0.304 + ], + "angle": 0, + "content": "Figure 4: Diversity Collapse The answer, semantic, and operation diversity of Gemma-2-2B reasoning traces across GSM8k test examples. Colors map to different SFT checkpoints." + }, + { + "type": "image", + "bbox": [ + 0.118, + 0.324, + 0.885, + 0.489 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.111, + 0.498, + 0.884, + 0.56 + ], + "angle": 0, + "content": "Figure 5: Pass@k for SFT and RL of Qwen-2.5-0.5B on GSM8K. The purple solid line measures Pass@K across SFT steps, while the dashed lines correspond to further training different checkpoints by Proximal Policy Optimization (PPO). While Pass@1 continues to improve, Pass@k for larger K can decrease even with RL." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.591, + 0.884, + 0.655 + ], + "angle": 0, + "content": "As shown in Figure 4, we observe a stark trend where longer SFT on Gemma-2-2B incrementally suffers from clear diversity collapse across all diversity metrics. Specifically, the model places most of its probability mass not only on one particular guess, but on a single reasoning trace, as evidenced by the reduced semantic and operation diversity." + }, + { + "type": "title", + "bbox": [ + 0.112, + 0.669, + 0.72, + 0.687 + ], + "angle": 0, + "content": "5.1 Theoretical Discussion of Diversity Collapse During SFT and RL" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.7, + 0.884, + 0.748 + ], + "angle": 0, + "content": "We assess theoretically why diversity collapse tends to arise during SFT and RL training. Our analysis reveals that while SFT and RL operate on different principles, they share common pathways that lead to reduced generation diversity when optimizing for accuracy." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.782, + 0.885, + 0.919 + ], + "angle": 0, + "content": "Diversity Collapse during SFT Overparameterized models are well-known to exhibit overconfidence in their predictions, an effect that has been studied extensively in classification (Guo et al., 2017). In particular, the model's confidence towards the most likely class \\( P(\\hat{y} = k_{\\max} \\mid x) \\) is often much higher than the model's accuracy. In binary classification with linear models \\( f(x) = \\sigma(\\langle \\boldsymbol{w}, \\boldsymbol{x} \\rangle) \\) and linearly separable training data, gradient descent provably drives the norm of the weights to infinity, causing probabilities to collapse to 0 or 1 (Soudry et al., 2018). We demonstrate this in linear models in Appendix A. A similar phenomenon likely arises in large reasoning models, which may also be prone to overfitting during SFT, ultimately leading to overly confident solutions in spite of limited coverage over the space of traces (Cobbe et al., 2021)." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.943, + 0.504, + 0.954 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.114, + 0.033, + 0.432, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.097, + 0.885, + 0.175 + ], + "angle": 0, + "content": "Diversity Collapse during RL We further prove why applying reinforcement learning to a low-diversity policy yields suboptimal results—and sometimes even exacerbates diversity collapse—in a discrete bandit setting (see Figure 5). In this scenario, we assume there exist \\( K \\) equally good arms, corresponding to a set of successful strategies, and one bad arm that the policy should learn to avoid. We show two key results in this setting:" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.195, + 0.887, + 0.288 + ], + "angle": 0, + "content": "1. Implicit Collapse of Policy Diversity without KL Regularization. Our analysis demonstrates that when standard reinforcement learning algorithms—REINFORCE and GRPO—are applied without KL regularization, the training dynamics inevitably lead to a collapse in output diversity. Although multiple arms (actions) are equally optimal, the updates become self-enforcing as training progresses. Once one of the good arms is randomly reinforced, its probability increases at the expense of the others, ultimately driving the policy to converge on a single-arm strategy (Theorem C.1)." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.301, + 0.887, + 0.395 + ], + "angle": 0, + "content": "2. Diversity Does Not Increase with KL Regularization. When KL regularization is incorporated to constrain the divergence from the initial policy in REINFORCE, the final policy no longer collapses into a single-arm strategy. However, the diversity of the converged policy cannot exceed the initial diversity. Concretely, we show that the probability distribution over the good arms remains proportional to the initial distribution when the RL algorithm converges (Theorem C.8). This explains why initializing with a diverse policy is critical for the generalization of reinforcement learning." + }, + { + "type": "list", + "bbox": [ + 0.113, + 0.195, + 0.887, + 0.395 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.414, + 0.518, + 0.433 + ], + "angle": 0, + "content": "6 Bias-Variance Tradeoff of Pass@K" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.457, + 0.886, + 0.52 + ], + "angle": 0, + "content": "So far, we saw a mismatch in growth of Pass@1 and Pass@K during SFT and alluded to the impact of diversity collapse to Pass@K. We now formalize the relationship between Pass@1, Pass@K, and diversity collapse. Notably, we show that the upper bound of expected Pass@K over the test distribution can be decomposed into bias and variance quantities." + }, + { + "type": "title", + "bbox": [ + 0.112, + 0.538, + 0.655, + 0.556 + ], + "angle": 0, + "content": "6.1 Diversity Collapse leads to Bimodal Pass@1 Distribution" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.571, + 0.886, + 0.633 + ], + "angle": 0, + "content": "Consider the expected \\(\\mathrm{Pass}@\\mathrm{K}\\) over the entire test distribution \\(x, y \\sim \\mathcal{D}\\). By Jensen's inequality, we can derive a straightforward upper bound of expected \\(\\mathrm{Pass}@\\mathrm{K}\\) that decomposes into the bias and variance of \\(1 - \\rho_x\\) (See proof in Appendix B). Note that the upper bound falls monotonically with larger bias and variance:" + }, + { + "type": "equation", + "bbox": [ + 0.152, + 0.662, + 0.753, + 0.702 + ], + "angle": 0, + "content": "\\[\n\\textbf {P r o p o s i t i o n 6 . 1 .} \\mathbb {E} _ {x, y \\sim \\mathcal {D}} [ \\operatorname {P a s s} @ \\mathrm {K} (x) ] \\leq 1 - ((\\underbrace {\\mathbb {E} _ {x , y \\sim \\mathcal {D}} [ 1 - \\rho_ {x} ]} _ {\\text {B i a s}}) ^ {2} + \\underbrace {\\operatorname {V a r} (\\rho_ {x})} _ {\\text {V a r i a n c e}}) ^ {k / 2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.729, + 0.885, + 0.807 + ], + "angle": 0, + "content": "In Figure 6b, we plot the distribution of error \\(1 - \\rho_{x}\\), estimated using 100 sampled traces, over GSM8K test examples. We notice two trends with longer SFT. First, bias decreases, i.e., the expected error shifts towards 0. However, the distribution becomes increasingly bimodal with the densities converging towards the two extremes 0 and 1. As a result, the variance increases with longer SFT. This increase in variance directly explains the drop in Pass@k." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.812, + 0.886, + 0.922 + ], + "angle": 0, + "content": "The bimodality of the \\(1 - \\rho_{x}\\) distribution means that the Pass@1 of any test problem is either very high or very low. Interestingly, one explanation for the increased bimodality of the distribution of \\(1 - \\rho_{x}\\) is in fact when models suffer from diversity collapse. In other words, a particular guess to be oversampled for each test problem. If the model places high probability on an incorrect guess, Pass@1 is very low. On the other hand, if the model places high probability on the correct guess, Pass@1 is very high. We illustrate this relationship in Figure 6a. All in all, Pass@K can be improved in two ways - either reduce bias by improving Pass@1 or reduce variance by increasing diversity." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.943, + 0.506, + 0.955 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.115, + 0.033, + 0.432, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "image", + "bbox": [ + 0.117, + 0.095, + 0.887, + 0.25 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.489, + 0.254, + 0.51, + 0.269 + ], + "angle": 0, + "content": "(a)" + }, + { + "type": "image", + "bbox": [ + 0.117, + 0.272, + 0.889, + 0.457 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.487, + 0.463, + 0.512, + 0.479 + ], + "angle": 0, + "content": "(b)" + }, + { + "type": "image_caption", + "bbox": [ + 0.112, + 0.49, + 0.889, + 0.553 + ], + "angle": 0, + "content": "Figure 6: Histogram of error \\(1 - \\rho_{x}\\) of Gemma-2-2B SFT checkpoints across GSM8k test. SFT progressively decreases bias but increases variance of error i.e., \\(1 - \\mathrm{Pass}@\\mathrm{l}\\), across the test distribution, causing Pass@K to fall. Applying Wise-FT reduces both bias and variance, but temperature scaling trades off decreasing variance with increased bias." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.578, + 0.422, + 0.596 + ], + "angle": 0, + "content": "6.2 WiSE-FT vs. Diverse Decoding" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.607, + 0.888, + 0.791 + ], + "angle": 0, + "content": "While we've proposed inducing diversity by WiSE-FT, another common alternative for inducing diversity is temperature scaling the logits. High temperature smoothens the logits allowing the model to more likely sample low probability tokens. In Figure 1, we see that while high temperatures indeed improve Pass@K, the Pass@K at any SFT timestep notably never reaches the Pass@K of our final WiSE-FT model. If temperature scaling also increases diversity, why does WiSE-FT strictly outperform sampling with high temperature? In Figure 6b, we plot the distribution of \\(1 - \\rho_{x}\\) if we sample from the last SFT checkpoint with high temperature \\(T = 1.5\\). As expected, we see that the model reasons more diversely. This smoothens the bimodal peaks and reduces the variance. However, the average accuracy of the model generations also degrades, causing the bias goes back up. We suspect bias-variance tradeoff is inherent in diversity-inducing decoding approaches. For example, min-p (Nguyen et al., 2024) combines temperature scaling with adaptive thresholding to not sample outlier tokens. However, this additional control is unable to reduce bias (Figure 16). Surprisingly, WiSE-FT uniquely manages to reduce both bias and variance." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.806, + 0.269, + 0.824 + ], + "angle": 0, + "content": "7 Discussion" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.842, + 0.888, + 0.922 + ], + "angle": 0, + "content": "In this work, we investigated the phenomenon of diversity collapse during the training of reasoning models. Our analysis reveals that standard SFT and RL pipelines can deteriorate in Pass@ \\( K \\) due to the convergence of model generations toward a single reasoning trace. We demonstrated that WiSE-FT, which interpolates between early and late SFT checkpoints, significantly improves both Pass@1 and Pass@ \\( K \\) across multiple math datasets and model scales. This is unlike alternative approaches such as temperature scaling or early" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.943, + 0.506, + 0.954 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.114, + 0.033, + 0.432, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.097, + 0.888, + 0.143 + ], + "angle": 0, + "content": "stopping, which face an inherent tradeoff. Furthermore, improving on these metrics corresponded with better adaptation to test-time scaling and RL. But other limitations of WiSE-FT may exist at larger scale, which we leave for future work." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.149, + 0.593, + 0.288 + ], + "angle": 0, + "content": "Overall, our work reveals the importance of maintaining diversity in reasoning models. Current decoding strategies (e.g., min-p, nucleus, and top-k) are still unable to fully extract a model's capabilities. We estimate that a significant gap, of tens of percent, remains compared to the optimal decoding strategy for Pass@K, i.e., top-K sampling over the model's marginal answer distribution \\( P(\\hat{y} \\mid x) \\) (see Table 1 and Appendix G). We encourage future works to address downstream limitations more carefully in earlier stages of the training pipeline." + }, + { + "type": "table", + "bbox": [ + 0.627, + 0.166, + 0.865, + 0.248 + ], + "angle": 0, + "content": "
MethodPass@2Pass@4
Nucleus0.570.67
Min-p0.570.67
Top-k0.560.67
Optimal0.760.83
" + }, + { + "type": "table_caption", + "bbox": [ + 0.599, + 0.257, + 0.89, + 0.29 + ], + "angle": 0, + "content": "Table 1: Best Pass@k of Gemma on GSM8k across SFT checkpoints" + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.303, + 0.366, + 0.325 + ], + "angle": 0, + "content": "8 Acknowledgements" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.339, + 0.886, + 0.417 + ], + "angle": 0, + "content": "We'd like to thank Aviral Kumar, Sean Welleck, Amrith Setlur and Yiding Jiang for insightful discussions about test-time scaling and reinforcement learning. We'd also like to thank Alex Li, Sachin Goyal, and Jacob Springer for their meaningful contribution to our figures and literature review. We gratefully acknowledge support from Apple, Google, Cisco, OpenAI, NSF, Okawa foundation, the AI2050 program at Schmidt Sciences (Grant #G2264481), and Bosch Center for AI." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.431, + 0.236, + 0.451 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.468, + 0.887, + 0.515 + ], + "angle": 0, + "content": "AlphaProof and AlphaGeometry teams. Ai achieves silver-medal standard solving international mathematical olympiad problems, jul 2024. URL https://deepmind.google/discover/blog/ai-solves-imo-problems-at-silver-medal-level/." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.524, + 0.887, + 0.557 + ], + "angle": 0, + "content": "Edward Beeching, Lewis Tunstall, and Sasha Rush. Scaling test-time compute with open models, 2024. URL https://huggingface.co/spaces/HuggingFaceH4/blogpost-scaling-test-time-compute." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.565, + 0.887, + 0.597 + ], + "angle": 0, + "content": "Jeff Bilmes. Submodularity in machine learning and artificial intelligence. arXiv preprint arXiv:2202.00132, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.606, + 0.887, + 0.652 + ], + "angle": 0, + "content": "Feng Chen, Allan Raventos, Nan Cheng, Surya Ganguli, and Shaul Druckmann. Rethinking fine-tuning when scaling test-time compute: Limiting confidence improves mathematical reasoning. arXiv preprint arXiv:2502.07154, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.663, + 0.885, + 0.695 + ], + "angle": 0, + "content": "Guoxin Chen, Minpeng Liao, Chengxi Li, and Kai Fan. Alphamath almost zero: Process supervision without process, 2024. URL https://arxiv.org/abs/2405.03553." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.703, + 0.887, + 0.751 + ], + "angle": 0, + "content": "Yinlam Chow, Guy Tennenholtz, Izzeddin Gur, Vincent Zhuang, Bo Dai, Sridhar Thiagarajan, Craig Boutilier, Rishabh Agarwal, Aviral Kumar, and Aleksandra Faust. Inference-aware fine-tuning for best-of-n sampling in large language models. arXiv preprint arXiv:2412.15287, 2024a." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.76, + 0.887, + 0.807 + ], + "angle": 0, + "content": "Yinlam Chow, Guy Tennenholtz, Izzeddin Gur, Vincent Zhuang, Bo Dai, Sridhar Thiagarajan, Craig Boutilier, Rishabh Agarwal, Aviral Kumar, and Aleksandra Faust. Inference-aware fine-tuning for best-of-n sampling in large language models, 2024b. URL https://arxiv.org/abs/2412.15287." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.816, + 0.887, + 0.864 + ], + "angle": 0, + "content": "Tianzhe Chu, Yuexiang Zhai, Jihan Yang, Shengbang Tong, Saining Xie, Dale Schuurmans, Quoc V. Le, Sergey Levine, and Yi Ma. Sft memorizes, rl generalizes: A comparative study of foundation model post-training, 2025. URL https://arxiv.org/abs/2501.17161." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.872, + 0.887, + 0.92 + ], + "angle": 0, + "content": "Karl Cobbe, Vineet Kosaraju, Mohammad Bavarian, Mark Chen, Heewoo Jun, Lukasz Kaiser, Matthias Plappert, Jerry Tworek, Jacob Hilton, Reiichiro Nakano, Christopher Hesse, and John Schulman. Training verifiers to solve math word problems, 2021. URL https://arxiv.org/abs/2110.14168." + }, + { + "type": "list", + "bbox": [ + 0.114, + 0.468, + 0.887, + 0.92 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.943, + 0.509, + 0.955 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.114, + 0.033, + 0.432, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.096, + 0.885, + 0.129 + ], + "angle": 0, + "content": "Chuan Guo, Geoff Pleiss, Yu Sun, and Kilian Q Weinberger. On calibration of modern neural networks. In International conference on machine learning, pp. 1321-1330. PMLR, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.137, + 0.885, + 0.183 + ], + "angle": 0, + "content": "Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.192, + 0.885, + 0.237 + ], + "angle": 0, + "content": "Dan Hendrycks, Collin Burns, Saurav Kadavath, Akul Arora, Steven Basart, Eric Tang, Dawn Song, and Jacob Steinhardt. Measuring mathematical problem solving with the math dataset, 2021. URL https://arxiv.org/abs/2103.03874." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.246, + 0.885, + 0.277 + ], + "angle": 0, + "content": "Ari Holtzman, Jan Buys, Li Du, Maxwell Forbes, and Yejin Choi. The curious case of neural text degeneration, 2020. URL https://arxiv.org/abs/1904.09751." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.285, + 0.885, + 0.332 + ], + "angle": 0, + "content": "Audrey Huang, Adam Block, Dylan J Foster, Dhruv Rohatgi, Cyril Zhang, Max Simchowitz, Jordan T Ash, and Akshay Krishnamurthy. Self-improvement in language models: The sharpening mechanism. arXiv preprint arXiv:2412.01951, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.341, + 0.885, + 0.386 + ], + "angle": 0, + "content": "Yifei Li, Zeqi Lin, Shizhuo Zhang, Qiang Fu, Bei Chen, Jian-Guang Lou, and Weizhu Chen. Making large language models better reasoners with step-aware verifier, 2023. URL https://arxiv.org/abs/2206.02336." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.395, + 0.885, + 0.442 + ], + "angle": 0, + "content": "Ziniu Li, Congliang Chen, Tian Xu, Zeyu Qin, Jiancong Xiao, Zhi-Quan Luo, and Ruoyu Sun. Preserving diversity in supervised fine-tuning of large language models. In The Thirteenth International Conference on Learning Representations, 2025. URL https://openreview.net/forum?id=NQEe7B7bSw." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.45, + 0.885, + 0.482 + ], + "angle": 0, + "content": "Hunter Lightman, Vineet Kosaraju, Yura Burda, Harri Edwards, Bowen Baker, Teddy Lee, Jan Leike, John Schulman, Ilya Sutskever, and Karl Cobbe. Let's verify step by step. arXiv preprint arXiv:2305.20050, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.49, + 0.885, + 0.535 + ], + "angle": 0, + "content": "Niklas Muennighoff, Zitong Yang, Weijia Shi, Xiang Lisa Li, Li Fei-Fei, Hannaneh Hajishirzi, Luke Zettlemoyer, Percy Liang, Emmanuel Candès, and Tatsunori Hashimoto. s1: Simple test-time scaling, 2025. URL https://arxiv.org/abs/2501.19393." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.544, + 0.885, + 0.589 + ], + "angle": 0, + "content": "Minh Nguyen, Andrew Baker, Clement Neo, Allen Roush, Andreas Kirsch, and Ravid Shwartz-Ziv. Turning up the heat: Min-p sampling for creative and coherent llm outputs, 2024. URL https://arxiv.org/abs/2407.01082." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.599, + 0.885, + 0.631 + ], + "angle": 0, + "content": "Theo X. Olausson, Jeevana Priya Inala, Chenglong Wang, Jianfeng Gao, and Armando Solar-Lezama. Is self-repair a silver bullet for code generation?, 2024. URL https://arxiv.org/abs/2306.09896." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.639, + 0.885, + 0.715 + ], + "angle": 0, + "content": "Pier Giuseppe Sessa, Robert Dadashi, Léonard Hussenot, Johan Ferret, Nino Vieillard, Alexandre Ramé, Bobak Shariari, Sarah Perrin, Abe Friesen, Geoffrey Cideron, Sertan Girgin, Piotr Stanczyk, Andrea Michi, Danila Sinopalnikov, Sabela Ramos, Amélie Héliou, Aliaksei Severyn, Matt Hoffman, Nikola Momchev, and Olivier Bachem. Bond: Aligning llms with best-of-n distillation, 2024. URL https://arxiv.org/abs/2407.14622." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.723, + 0.885, + 0.77 + ], + "angle": 0, + "content": "Amrith Setlur, Chirag Nagpal, Adam Fisch, Xinyang Geng, Jacob Eisenstein, Rishabh Agarwal, Alekh Agarwal, Jonathan Berant, and Aviral Kumar. Rewarding progress: Scaling automated process verifiers for llm reasoning, 2024. URL https://arxiv.org/abs/2410.08146." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.778, + 0.885, + 0.824 + ], + "angle": 0, + "content": "Louis Shao, Stephan Gouws, Denny Britz, Anna Goldie, Brian Strope, and Ray Kurzweil. Generating high-quality and informative conversation responses with sequence-to-sequence models. arXiv preprint arXiv:1701.03185, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.833, + 0.885, + 0.865 + ], + "angle": 0, + "content": "Charlie Snell, Jaehoon Lee, Kelvin Xu, and Aviral Kumar. Scaling llm test-time compute optimally can be more effective than scaling model parameters, 2024. URL https://arxiv.org/abs/2408.03314." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.872, + 0.885, + 0.918 + ], + "angle": 0, + "content": "Yuda Song, Hanlin Zhang, Carson Eisenach, Sham Kakade, Dean Foster, and Udaya Ghai. Mind the gap: Examining the self-improvement capabilities of large language models. arXiv preprint arXiv:2412.02674, 2024." + }, + { + "type": "list", + "bbox": [ + 0.114, + 0.096, + 0.885, + 0.918 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.944, + 0.508, + 0.955 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.115, + 0.033, + 0.432, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.096, + 0.885, + 0.129 + ], + "angle": 0, + "content": "Daniel Soudry, Elad Hoffer, Mor Shpigel Nacson, Suriya Gunasekar, and Nathan Srebro. The implicit bias of gradient descent on separable data. Journal of Machine Learning Research, 19(70):1-57, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.137, + 0.885, + 0.183 + ], + "angle": 0, + "content": "Ashwin K Vijayakumar, Michael Cogswell, Ramprasath R. Selvaraju, Qing Sun, Stefan Lee, David Crandall, and Dhruv Batra. Diverse beam search: Decoding diverse solutions from neural sequence models, 2018. URL https://arxiv.org/abs/1610.02424." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.192, + 0.885, + 0.239 + ], + "angle": 0, + "content": "Xuezhi Wang, Jason Wei, Dale Schuurmans, Quoc Le, Ed Chi, Sharan Narang, Aakanksha Chowdhery, and Denny Zhou. Self-consistency improves chain of thought reasoning in language models, 2023. URL https://arxiv.org/abs/2203.11171." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.248, + 0.885, + 0.295 + ], + "angle": 0, + "content": "Mitchell Wortsman, Gabriel Ilharco, Jong Wook Kim, Mike Li, Simon Kornblith, Rebecca Roelofs, Raphael Gontijo-Lopes, Hannaneh Hajishirzi, Ali Farhadi, Hongseok Namkoong, and Ludwig Schmidt. Robust fine-tuning of zero-shot models, 2022. URL https://arxiv.org/abs/2109.01903." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.303, + 0.885, + 0.35 + ], + "angle": 0, + "content": "Yangzhen Wu, Zhiqing Sun, Shanda Li, Sean Welleck, and Yiming Yang. Inference scaling laws: An empirical analysis of compute-optimal inference for problem-solving with language models. arXiv preprint arXiv:2408.00724, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.359, + 0.885, + 0.391 + ], + "angle": 0, + "content": "Wei Xiong, Hanning Zhang, Nan Jiang, and Tong Zhang. An implementation of generative prm. https://github.com/RLHFlow/RLHF-Reward-Modeling, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.399, + 0.885, + 0.431 + ], + "angle": 0, + "content": "Edward Yeo, Yuxuan Tong, Morry Niu, Graham Neubig, and Xiang Yue. Demystifying long chain-of-thought reasoning in llms, 2025. URL https://arxiv.org/abs/2502.03373." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.439, + 0.885, + 0.485 + ], + "angle": 0, + "content": "Longhui Yu, Weisen Jiang, Han Shi, Jincheng Yu, Zhengying Liu, Yu Zhang, James T Kwok, Zhenguo Li, Adrian Weller, and Weiyang Liu. Metamath: Bootstrap your own mathematical questions for large language models. arXiv preprint arXiv:2309.12284, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.494, + 0.885, + 0.527 + ], + "angle": 0, + "content": "Dun Zhang, Jiacheng Li, Ziyang Zeng, and Fulong Wang. Jasper and stella: distillation of sota embedding models. arXiv preprint arXiv:2412.19048, 2024a." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.534, + 0.885, + 0.567 + ], + "angle": 0, + "content": "Yiming Zhang, Avi Schwarzschild, Nicholas Carlini, Zico Kolter, and Daphne Ippolito. Forcing diffuse distributions out of language models, 2024b. URL https://arxiv.org/abs/2404.10859." + }, + { + "type": "list", + "bbox": [ + 0.113, + 0.096, + 0.885, + 0.567 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.943, + 0.508, + 0.955 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.114, + 0.033, + 0.435, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.096, + 0.458, + 0.117 + ], + "angle": 0, + "content": "A SFT in Binary Classification" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.139, + 0.885, + 0.171 + ], + "angle": 0, + "content": "Data and Model Setup We train a linear classifier \\( f(\\pmb{x}) = \\langle \\pmb{w}, \\pmb{x} \\rangle \\) from random initialization over a binary Gaussian mixture distribution:" + }, + { + "type": "equation", + "bbox": [ + 0.424, + 0.186, + 0.884, + 0.206 + ], + "angle": 0, + "content": "\\[\nx \\mid y \\sim \\mathcal {N} (y \\boldsymbol {\\mu}, I ^ {d \\times d}) \\tag {3}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.413, + 0.209, + 0.884, + 0.226 + ], + "angle": 0, + "content": "\\[\ny \\in \\{1, - 1 \\} \\text {u n i f o r m l y} \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.242, + 0.886, + 0.292 + ], + "angle": 0, + "content": "Given a model, we sample predictions, namely \\(\\hat{y} = 1\\) with probability \\(\\sigma (\\langle \\pmb {w},\\pmb {x}\\rangle) = (1 + \\exp (-\\langle \\pmb {w},\\pmb {x}\\rangle))^{-1}\\), or \\(\\hat{y} = 0\\). Then, per-example Pass@1 is equal to \\(\\rho_{x} = \\sigma (y\\cdot \\langle \\pmb {w},\\pmb {x}\\rangle)\\). Similarly, the expected Pass@k is equal to \\(1 - (1 - \\rho_{x})^{k}\\)." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.297, + 0.681, + 0.41 + ], + "angle": 0, + "content": "In our experiment, we train an overparametrized linear classifier over binary Gaussian data mixture \\( x \\mid y \\sim \\mathcal{N}(y \\cdot \\frac{1}{\\sqrt{d}} \\mathbf{1}, \\frac{1}{2} I) \\) where \\( y = \\{-1, 1\\} \\) and \\( d = 1000 \\). We then evaluate \\( \\rho_x \\) of 400 test samples. As training progresses, the distribution of \\( \\rho_x \\) over the test data becomes bimodal due to the norm of \\( w \\) monotonically increasing once it separates the training examples. Similarly, we observe that this leads to a drop in Pass@k while Pass@1 continues to improve." + }, + { + "type": "image", + "bbox": [ + 0.694, + 0.29, + 0.879, + 0.405 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.695, + 0.416, + 0.877, + 0.433 + ], + "angle": 0, + "content": "Figure 7: Weight Norm" + }, + { + "type": "image", + "bbox": [ + 0.132, + 0.455, + 0.273, + 0.562 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.281, + 0.456, + 0.422, + 0.561 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.431, + 0.456, + 0.572, + 0.561 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.576, + 0.456, + 0.72, + 0.561 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.729, + 0.456, + 0.871, + 0.561 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.283, + 0.58, + 0.715, + 0.597 + ], + "angle": 0, + "content": "Figure 8: Pass@k across Training in Binary Classification" + }, + { + "type": "image", + "bbox": [ + 0.125, + 0.643, + 0.273, + 0.749 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.277, + 0.642, + 0.421, + 0.748 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.427, + 0.642, + 0.572, + 0.748 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.579, + 0.642, + 0.723, + 0.748 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.729, + 0.642, + 0.875, + 0.749 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.321, + 0.767, + 0.677, + 0.785 + ], + "angle": 0, + "content": "Figure 9: Histogram of \\(\\rho_{x}\\) across training steps" + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.821, + 0.345, + 0.842 + ], + "angle": 0, + "content": "B Expected Pass@k" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.863, + 0.249, + 0.879 + ], + "angle": 0, + "content": "Proposition B.1." + }, + { + "type": "equation", + "bbox": [ + 0.265, + 0.895, + 0.733, + 0.917 + ], + "angle": 0, + "content": "\\[\n\\mathbb {E} _ {x, y \\sim \\mathcal {D}} \\left[ \\mathrm {P a s s @ K} (x) \\right] \\leq 1 - \\left(\\left(\\mathbb {E} _ {x, y \\sim \\mathcal {D}} [ 1 - \\rho_ {x} ]\\right) ^ {2} + \\mathrm {V a r} (\\rho_ {x})\\right) ^ {k / 2}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.943, + 0.509, + 0.955 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.115, + 0.033, + 0.432, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.097, + 0.163, + 0.113 + ], + "angle": 0, + "content": "Proof." + }, + { + "type": "equation", + "bbox": [ + 0.246, + 0.121, + 0.885, + 0.235 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\mathbb {E} \\left[ (1 - \\rho_ {x}) ^ {k} \\right] \\geq \\mathbb {E} \\left[ (1 - \\rho_ {X}) ^ {2} \\right] ^ {k / 2} (5) \\\\ = \\left(1 - 2 \\mathbb {E} \\left[ \\rho_ {x} \\right] + \\mathbb {E} \\left[ \\rho_ {x} ^ {2} \\right]\\right) ^ {k / 2} (6) \\\\ = \\left(\\left(1 - 2 \\mathbb {E} [ \\rho_ {x} ] + \\mathbb {E} [ \\rho_ {x} ] ^ {2}\\right) + \\left(\\mathbb {E} \\left[ \\rho_ {x} ^ {2} \\right] - \\mathbb {E} [ \\rho_ {x} ] ^ {2}\\right)\\right) ^ {k / 2} (7) \\\\ = \\left(\\left(1 - \\mathbb {E} [ \\rho_ {x} ]\\right) ^ {2} + \\operatorname {V a r} (\\rho_ {x})\\right) ^ {k / 2} (8) \\\\ \\end{array}\n\\]" + }, + { + "type": "image", + "bbox": [ + 0.866, + 0.242, + 0.884, + 0.255 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.943, + 0.509, + 0.955 + ], + "angle": 0, + "content": "14" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.114, + 0.033, + 0.431, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.096, + 0.275, + 0.117 + ], + "angle": 0, + "content": "C RL Theory" + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.133, + 0.241, + 0.149 + ], + "angle": 0, + "content": "C.1 Overview" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.161, + 0.884, + 0.194 + ], + "angle": 0, + "content": "We will prove that in a discrete bandit setting with \\( K \\) equally good arms that is the best arm, both REINFORCE and GRPO without KL regularization will eventually collapse into a single-arm strategy." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.199, + 0.885, + 0.247 + ], + "angle": 0, + "content": "We will further prove that, with KL regularization with respect to the initial policy, the converged policy of REINFORCE have the same action distribution as the initial policy when constrained on the set of best arms. Therefore, diversity within good actions will not increase through REINFORCE training." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.259, + 0.336, + 0.278 + ], + "angle": 0, + "content": "C.2 Notations and Setup" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.289, + 0.886, + 0.334 + ], + "angle": 0, + "content": "Formally we consider the following setting. We consider a \\( K + 1 \\)-armed bandit, with arms \\( \\{1,2,\\dots ,K + 1\\} \\). Arms \\( 1,\\ldots ,K \\) are \"good,\" each yielding reward 1, and the other arm is \"bad,\" yielding reward 0. We use a softmax parameterization:" + }, + { + "type": "equation", + "bbox": [ + 0.353, + 0.334, + 0.641, + 0.375 + ], + "angle": 0, + "content": "\\[\np _ {i} = \\frac {e ^ {\\theta_ {i}}}{\\sum_ {j = 1} ^ {K + 1} e ^ {\\theta_ {j}}}, \\quad i = 1, \\dots , K + 1.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.379, + 0.724, + 0.399 + ], + "angle": 0, + "content": "to denote the action distribution. We will use \\(\\theta_i^{(t)}\\) to denote the parameter at step \\(t\\)." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.403, + 0.884, + 0.435 + ], + "angle": 0, + "content": "It is standard to consider using the KL divergence between the current policy with a reference policy (which we set as \\( p_0 \\) here) as a regularization term." + }, + { + "type": "equation", + "bbox": [ + 0.374, + 0.438, + 0.621, + 0.484 + ], + "angle": 0, + "content": "\\[\n\\mathrm {K L} (p ^ {(t)} | p ^ {(0)}) = \\sum_ {i = 1} ^ {K + 1} p _ {i} ^ {(t)} \\log \\frac {p _ {i} ^ {(t)}}{p _ {i} ^ {(0)}}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.493, + 0.652, + 0.51 + ], + "angle": 0, + "content": "For REINFORCE, we will consider the following training setup. At step \\( t \\):" + }, + { + "type": "text", + "bbox": [ + 0.151, + 0.52, + 0.761, + 0.542 + ], + "angle": 0, + "content": "1. We sample an arm \\( I_{t} \\) according to \\( p(\\cdot) = (p_1^{(t)},\\dots ,p_{K + 1}^{(t)}) \\) and receive reward \\( r_t \\)" + }, + { + "type": "text", + "bbox": [ + 0.151, + 0.543, + 0.416, + 0.559 + ], + "angle": 0, + "content": "2. We update using policy gradient." + }, + { + "type": "list", + "bbox": [ + 0.151, + 0.52, + 0.761, + 0.559 + ], + "angle": 0, + "content": null + }, + { + "type": "equation", + "bbox": [ + 0.23, + 0.563, + 0.824, + 0.586 + ], + "angle": 0, + "content": "\\[\n\\theta_ {i} ^ {(t + 1)} = \\theta_ {i} ^ {(t)} + \\eta r _ {t} \\nabla_ {\\theta_ {i}} (\\log p _ {I _ {t}} ^ {(t)}) - \\eta \\beta \\nabla_ {\\theta_ {i}} \\mathrm {K L} (p ^ {(t)} | p ^ {(0)}), i = 1, \\dots , K + 1,\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.587, + 0.885, + 0.618 + ], + "angle": 0, + "content": "where \\(\\eta > 0\\) is the step size and \\(\\beta\\) is the hyperparameter controlling the strength of KL regularization." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.629, + 0.884, + 0.661 + ], + "angle": 0, + "content": "For GRPO, we will consider the following simplified training setup. This is equivalent to the empirical version of GRPO with online sampling." + }, + { + "type": "text", + "bbox": [ + 0.151, + 0.67, + 0.837, + 0.692 + ], + "angle": 0, + "content": "1. Sample \\( G \\) arms \\( \\{I_t^{(1)},\\dots ,I_t^{(G)}\\} \\) i.i.d. from the current policy \\( p(\\cdot) \\) and receive rewards \\( r_t^{(g)} \\)." + }, + { + "type": "text", + "bbox": [ + 0.152, + 0.694, + 0.245, + 0.71 + ], + "angle": 0, + "content": "2. Compute" + }, + { + "type": "list", + "bbox": [ + 0.151, + 0.67, + 0.837, + 0.71 + ], + "angle": 0, + "content": null + }, + { + "type": "equation", + "bbox": [ + 0.345, + 0.711, + 0.709, + 0.757 + ], + "angle": 0, + "content": "\\[\n\\mu_ {t} = \\frac {1}{G} \\sum_ {g = 1} ^ {G} r _ {t} ^ {(g)}, \\quad \\sigma_ {t} = \\sqrt {\\frac {1}{G} \\sum_ {g = 1} ^ {G} \\left(r _ {t} ^ {(g)} - \\mu_ {t}\\right) ^ {2}},\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.758, + 0.446, + 0.775 + ], + "angle": 0, + "content": "and define the normalized advantage" + }, + { + "type": "equation", + "bbox": [ + 0.412, + 0.778, + 0.641, + 0.835 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{r} \\tilde {r} _ {t} ^ {(g)} = \\left\\{ \\begin{array}{l l} \\frac {r _ {t} ^ {(g)} - \\mu_ {t}}{\\sigma_ {t}}, & \\sigma_ {t} \\neq 0, \\\\ 0, & \\sigma_ {t} = 0. \\end{array} \\right. \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.837, + 0.42, + 0.854 + ], + "angle": 0, + "content": "We will skip the update if \\(\\sigma_t = 0\\)." + }, + { + "type": "text", + "bbox": [ + 0.151, + 0.857, + 0.392, + 0.873 + ], + "angle": 0, + "content": "3. Update each parameter \\(\\theta_{i}\\) via" + }, + { + "type": "equation", + "bbox": [ + 0.221, + 0.878, + 0.832, + 0.922 + ], + "angle": 0, + "content": "\\[\n\\theta_ {i} \\gets \\theta_ {i} + \\frac {\\eta}{G} \\sum_ {g = 1} ^ {G} \\widehat {r} _ {t} ^ {(g)} \\nabla_ {\\theta_ {i}} (\\log p _ {I _ {t} ^ {(g)}} ^ {(t)}) - \\eta \\beta \\nabla_ {\\theta_ {i}} \\mathrm {K L} (p ^ {(t)} | p ^ {(0)}). i = 1, \\ldots , K + 1,\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.943, + 0.508, + 0.955 + ], + "angle": 0, + "content": "15" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.114, + 0.033, + 0.431, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "title", + "bbox": [ + 0.112, + 0.096, + 0.633, + 0.116 + ], + "angle": 0, + "content": "C.3 Implicit Diversity Collapse without KL regularization" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.126, + 0.886, + 0.159 + ], + "angle": 0, + "content": "Theorem C.1 (Collapse to Deterministic Policy). Under REINFORCE or GRPO updates without KL regularization \\((\\beta_0 = 0)\\), given a sufficient small \\(\\eta\\), with probability 1:" + }, + { + "type": "equation", + "bbox": [ + 0.415, + 0.165, + 0.581, + 0.197 + ], + "angle": 0, + "content": "\\[\n\\limsup_{t\\to \\infty}\\max_{i\\in [K]}p_{i}^{(t)} = 1.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.202, + 0.586, + 0.22 + ], + "angle": 0, + "content": "Thus, the policy collapses to a single-arm strategy during training." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.234, + 0.326, + 0.252 + ], + "angle": 0, + "content": "Proof. The proof is two-fold." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.257, + 0.678, + 0.275 + ], + "angle": 0, + "content": "Using Lemma C.3 and C.4, we can show that bad arm probability diminishes," + }, + { + "type": "equation", + "bbox": [ + 0.44, + 0.281, + 0.558, + 0.309 + ], + "angle": 0, + "content": "\\[\n\\lim _ {t \\to \\infty} p _ {K + 1} ^ {(t)} = 0\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.322, + 0.581, + 0.34 + ], + "angle": 0, + "content": "We will then define a property named Self-enforcing Stochastic" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.345, + 0.886, + 0.379 + ], + "angle": 0, + "content": "Definition C.2 (Self-enforcing Stochastic Policy Update Rule). We define three properties of policy update rule that will lead to diversity collapse" + }, + { + "type": "text", + "bbox": [ + 0.153, + 0.393, + 0.886, + 0.445 + ], + "angle": 0, + "content": "1. The policy update takes the form of \\(\\sum_{k=1}^{B} A_k \\nabla \\log p_i(\\theta_{i_k})\\) where \\(i_k\\) is the \\(k\\)-th sampled arm in the batch and \\(A_k\\) is a function determined by (i) the sum of reward \\(\\sum_{i=1}^{K} r_{i_k}\\) with in the batch; (ii) the reward \\(r_{i_k}\\) and (iii) the batch size \\(B\\)." + }, + { + "type": "text", + "bbox": [ + 0.152, + 0.453, + 0.887, + 0.496 + ], + "angle": 0, + "content": "2. A policy update rule is said to be self-enforcing, if \\(\\mathbb{E}[\\theta_i^{(t + 1)} - \\theta_i^{(t)}]\\) is monotonous with \\(\\theta_{i}^{(t)}\\) for all \\(i\\in [K]\\) and \\(t\\). Further \\(\\mathbb{E}[\\theta_i^{(t + 1)} - \\theta_i^{(t)}]\\) is non-positive if \\(i\\geq K + 1\\) and is non-negative if \\(i\\leq K\\)." + }, + { + "type": "text", + "bbox": [ + 0.152, + 0.502, + 0.886, + 0.577 + ], + "angle": 0, + "content": "3. A policy update rule is said to be self-enforcing stochastic if it is self-enforcing and there exists constants \\( C_1, C_2 > 0 \\) such that for any \\( \\epsilon > 0 \\), whenever the current policy satisfies \\( \\max_{i \\in [K]} p_i^{(t)} \\in [1/2K, 1 - \\epsilon] \\) (i.e., no single good arm dominates), for \\( i^* = \\arg \\max_{i \\in [K]} p_i^{(t)} \\) the conditional second moment of the parameter updates for every arm \\( i \\in [K + 1] \\) and \\( i \\neq i^* \\) satisfies:" + }, + { + "type": "list", + "bbox": [ + 0.152, + 0.393, + 0.887, + 0.577 + ], + "angle": 0, + "content": null + }, + { + "type": "equation", + "bbox": [ + 0.321, + 0.583, + 0.733, + 0.618 + ], + "angle": 0, + "content": "\\[\n\\mathbb {E} \\left[ \\left(\\left(\\theta_ {i} ^ {(t + 1)} - \\theta_ {i} ^ {(t)}\\right) - \\left(\\theta_ {i ^ {*}} ^ {(t + 1)} - \\theta_ {i ^ {*}} ^ {(t)}\\right)\\right) ^ {2} \\mid \\theta^ {(t)} \\right] \\geq C _ {1} \\epsilon^ {2}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.173, + 0.624, + 0.206, + 0.638 + ], + "angle": 0, + "content": "and" + }, + { + "type": "equation", + "bbox": [ + 0.454, + 0.634, + 0.603, + 0.657 + ], + "angle": 0, + "content": "\\[\n| \\theta_ {i} ^ {(t + 1)} - \\theta_ {i} ^ {(t)} | < C _ {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.67, + 0.883, + 0.702 + ], + "angle": 0, + "content": "Lemma C.5 shows that for any self-enforcing stochastic policy update rule, the final policy collapses into a single-arm policy." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.707, + 0.886, + 0.74 + ], + "angle": 0, + "content": "Using Lemma C.6 and C.7, we can show that REINFORCE and GRPO are self-enforcing stochastic policy update rules when bad arm probability is lower than \\(1 / 2\\). The proof is then complete." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.747, + 0.886, + 0.786 + ], + "angle": 0, + "content": "Lemma C.3 (Bad Arm Probability Diminishes Using REINFORCE). Under the REINFORCE algorithm without KL regularization \\((\\beta = 0)\\), \\(\\lim_{t\\to \\infty}p_{K + 1}^{(t)} = 0\\) almost surely." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.798, + 0.549, + 0.815 + ], + "angle": 0, + "content": "Proof. We can first simplify the REINFORCE update rule to" + }, + { + "type": "equation", + "bbox": [ + 0.288, + 0.822, + 0.709, + 0.845 + ], + "angle": 0, + "content": "\\[\n\\theta_ {i} ^ {(t + 1)} = \\theta_ {i} ^ {(t)} + \\eta r _ {t} (\\mathbf {1} (I _ {t} = i) - p _ {i} ^ {(t)}), \\quad i = 1, \\dots , K + 1.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.86, + 0.543, + 0.881 + ], + "angle": 0, + "content": "Noted that \\(\\sum_{i}\\theta_{i}^{(t)}\\) will not change with \\(t\\), WLOG, assume" + }, + { + "type": "equation", + "bbox": [ + 0.451, + 0.888, + 0.547, + 0.922 + ], + "angle": 0, + "content": "\\[\n\\sum_ {i} \\theta_ {i} ^ {(t)} = 0.\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.944, + 0.508, + 0.955 + ], + "angle": 0, + "content": "16" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.114, + 0.033, + 0.431, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.097, + 0.727, + 0.114 + ], + "angle": 0, + "content": "Because \\(r_{K + 1} = 0\\), we can then assume without loss of generality, for all \\(t\\), \\(I_t \\leq K\\)." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.12, + 0.289, + 0.136 + ], + "angle": 0, + "content": "This then suggests that" + }, + { + "type": "equation", + "bbox": [ + 0.406, + 0.137, + 0.591, + 0.161 + ], + "angle": 0, + "content": "\\[\n\\theta_ {K + 1} ^ {(t + 1)} = \\theta_ {K + 1} ^ {(t)} - \\eta p _ {K + 1} ^ {(t)}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.166, + 0.295, + 0.182 + ], + "angle": 0, + "content": "monotonically decrease." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.19, + 0.886, + 0.232 + ], + "angle": 0, + "content": "For any \\(\\epsilon\\), if \\(p_{K + 1}^{(t)} > \\epsilon\\) holds for infinite \\(t\\), then there exists \\(t_0\\), where \\(\\theta_{K + 1}^t < \\log \\epsilon\\) for any \\(t > t_0\\). For any \\(t > t_0\\), there exists \\(i \\in [K]\\), such that \\(\\theta_i^{(t)} > 0\\). This then suggests that" + }, + { + "type": "equation", + "bbox": [ + 0.38, + 0.239, + 0.615, + 0.263 + ], + "angle": 0, + "content": "\\[\np _ {K + 1} ^ {(t)} \\leq \\exp (\\theta_ {K + 1} ^ {(t)} - \\theta_ {i} ^ {(t)}) \\leq \\epsilon .\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.27, + 0.535, + 0.287 + ], + "angle": 0, + "content": "This leads to a contradiction. The proof is then complete." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.301, + 0.884, + 0.339 + ], + "angle": 0, + "content": "Lemma C.4 (Bad Arm Probability Diminishes Using GRPO). Under the GRPO algorithm without KL regularization \\((\\beta = 0), \\lim_{t \\to \\infty} p_{K+1}^{(t)} = 0\\) almost surely." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.361, + 0.883, + 0.397 + ], + "angle": 0, + "content": "Proof. For GRPO, we can show that \\(\\tilde{r}_t^{(g)}\\) is negative iff \\(I_t^{(g)} = K + 1\\). Therefore, we can show that \\(\\theta_{K+1}^{(t)}\\) monotonically decreases, similar to the case in REINFORCE." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.404, + 0.884, + 0.461 + ], + "angle": 0, + "content": "If \\( p_{K+1}^{(t)} > \\epsilon \\) holds for some \\( t \\), one can prove that \\( \\theta_{K+1}^{(t)} \\) will decrease by a constant depending on \\( \\epsilon \\) in expectation. Therefore, following the same line as in C.3, we can prove that \\( \\lim_{t \\to \\infty} p_{K+1}^{(t)} = 0 \\) almost surely." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.473, + 0.886, + 0.525 + ], + "angle": 0, + "content": "Lemma C.5 (Collapse Happens for All Self-enforcing Stochastic Policy Update Rule). Consider a policy update process that is self-enforcing stochastic (Definition C.2), then \\(\\lim \\sup_{t\\to \\infty}\\max_{i\\in [K]}p_i^{(t)} = 1\\) almost surely." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.546, + 0.886, + 0.579 + ], + "angle": 0, + "content": "Proof. We will inductively prove that for different \\(K\\) the following induction hypotheses, for any \\(\\epsilon, \\delta > 0\\), there exists \\(T_{\\epsilon, \\delta, K} > 0\\)," + }, + { + "type": "equation", + "bbox": [ + 0.37, + 0.605, + 0.626, + 0.636 + ], + "angle": 0, + "content": "\\[\n\\Pr \\left(\\max _ {t < T _ {\\epsilon , \\delta , K}} \\max _ {i \\in [ K ]} p _ {i} ^ {(t)} < 1 - \\epsilon\\right) < \\delta .\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.652, + 0.411, + 0.668 + ], + "angle": 0, + "content": "We first consider the case where \\( K = 2 \\)." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.675, + 0.321, + 0.691 + ], + "angle": 0, + "content": "Consider the stopping time," + }, + { + "type": "equation", + "bbox": [ + 0.384, + 0.701, + 0.614, + 0.732 + ], + "angle": 0, + "content": "\\[\n\\tau_ {\\epsilon} = \\arg \\min _ {t} \\max _ {i \\in [ K ]} p _ {i} ^ {(t)} > 1 - \\epsilon\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.748, + 0.545, + 0.768 + ], + "angle": 0, + "content": "For any \\(\\mathcal{I} = \\{1,2\\}\\), define \\(\\Delta_{\\mathcal{I}}^{t} = \\max_{j\\in [K]}\\theta_{j}^{t} - \\min_{j\\in \\mathcal{I}}\\theta_{i}^{t}\\)." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.774, + 0.884, + 0.811 + ], + "angle": 0, + "content": "Assume \\(\\theta_{i*}^t = \\max_{j\\in [K]}\\theta_j^t\\), because \\(|\\mathcal{I}|\\geq 2\\), there exists \\(i\\neq i^{*}\\), \\(\\min_{j\\in \\mathcal{I}}\\theta_i^t >0\\). We will show three properties of \\(\\Delta_I^t\\)" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.818, + 0.749, + 0.838 + ], + "angle": 0, + "content": "First \\(\\Delta_{\\mathcal{I}}^{(t)}\\) is a submartingale defined on the filtration of the distribution of \\(\\theta^{(t)}\\) because" + }, + { + "type": "equation", + "bbox": [ + 0.265, + 0.865, + 0.731, + 0.888 + ], + "angle": 0, + "content": "\\[\n\\mathbb {E} [ \\Delta_ {\\mathcal {I}} ^ {(t)} | \\theta_ {t} ] - \\Delta_ {\\mathcal {I}} ^ {(t - 1)} > \\mathbb {E} [ (\\theta_ {i ^ {*}} ^ {t + 1} - \\theta_ {i ^ {*}} ^ {t}) - (\\theta_ {i} ^ {t + 1} - \\theta_ {i} ^ {t}) | \\theta_ {t} ] > 0.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.903, + 0.336, + 0.92 + ], + "angle": 0, + "content": "as the policy is self-enforcing." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.943, + 0.508, + 0.955 + ], + "angle": 0, + "content": "17" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.114, + 0.033, + 0.431, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.095, + 0.436, + 0.117 + ], + "angle": 0, + "content": "Further \\(\\Delta_{\\mathcal{I}}^{(t)}\\) has bounded growth of \\(2C_2\\) as" + }, + { + "type": "equation", + "bbox": [ + 0.395, + 0.127, + 0.603, + 0.182 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} | \\max _ {j \\in [ K ]} \\theta_ {j} ^ {t + 1} - \\max _ {j \\in [ K ]} \\theta_ {j} ^ {t} | < C _ {2}. \\\\ \\bigl|\\min_{j\\in \\mathcal{I}}\\theta_{j}^{t + 1} - \\max_{j\\in \\mathcal{I}}\\theta_{j}^{t}\\bigr| < C_{2}. \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.223, + 0.831, + 0.243 + ], + "angle": 0, + "content": "Furthermore, the second-momentum of \\(\\Delta_{\\mathcal{I}}^{(t)}\\) needs to increase with \\(t\\) by a constant for any \\(t < \\tau_{\\epsilon}\\)." + }, + { + "type": "equation", + "bbox": [ + 0.298, + 0.253, + 0.699, + 0.297 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\mathbb {E} \\left[ \\left(\\Delta_ {\\mathcal {I}} ^ {(t + 1)}\\right) ^ {2} \\mid \\theta_ {t} \\right] \\geq \\left(\\Delta_ {\\mathcal {I}} ^ {(t)}\\right) ^ {2} + \\mathbb {E} \\left[ \\left(\\Delta_ {\\mathcal {I}} ^ {(t + 1)} - \\Delta_ {\\mathcal {I}} ^ {(t)}\\right)\\right) ^ {2} \\mid \\theta_ {t} ] \\\\ \\geq \\left(\\Delta_ {I} ^ {(t)}\\right) ^ {2} + C _ {1} \\epsilon^ {2}. \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.317, + 0.618, + 0.338 + ], + "angle": 0, + "content": "When \\(t < \\tau_{\\epsilon}\\), it holds that \\(\\Delta_{\\mathcal{I}}^{(t)} < \\log \\frac{2}{\\epsilon}\\), otherwise we can prove that" + }, + { + "type": "equation", + "bbox": [ + 0.271, + 0.347, + 0.726, + 0.38 + ], + "angle": 0, + "content": "\\[\n\\max _ {i, j \\in \\{1, 2 \\}} p _ {i} / p _ {j} = \\exp (\\Delta_ {\\mathcal {I}} ^ {(t)}) > \\frac {2 - 2 \\epsilon}{\\epsilon}. \\Rightarrow \\max _ {i \\in \\{1, 2 \\}} p _ {i} > 1 - \\epsilon .\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.389, + 0.633, + 0.406 + ], + "angle": 0, + "content": "This is a contradiction. Further, by Martingale inequality, we have that" + }, + { + "type": "equation", + "bbox": [ + 0.311, + 0.415, + 0.687, + 0.443 + ], + "angle": 0, + "content": "\\[\n\\mathbb {E} [ \\left(\\Delta^ {\\min \\{t, \\tau_ {\\epsilon} \\}}\\right) ^ {2} ] > \\mathbb {E} [ \\left(\\Delta^ {0}\\right) ^ {2} ] + C _ {1} \\epsilon^ {2} \\mathbb {E} [ \\min \\{t, \\tau_ {\\epsilon} \\} ]\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.46, + 0.478, + 0.477 + ], + "angle": 0, + "content": "Further, as \\(\\Delta^t\\) has bounded growth, we have that" + }, + { + "type": "equation", + "bbox": [ + 0.367, + 0.486, + 0.632, + 0.517 + ], + "angle": 0, + "content": "\\[\n\\mathbb {E} \\left[ \\left(\\Delta^ {\\min \\{t, \\tau_ {\\epsilon} \\}}\\right) ^ {2} \\right] < (\\log \\frac {2}{\\epsilon} + 2 C _ {2}) ^ {2}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.533, + 0.571, + 0.56 + ], + "angle": 0, + "content": "This implies \\(\\mathbb{E}[\\min \\{t,\\tau_{\\epsilon}\\}] < \\frac{(\\log\\frac{2}{\\epsilon} + 2C_2)^2}{C_1\\epsilon^2}\\) for all \\(t\\), this implies" + }, + { + "type": "equation", + "bbox": [ + 0.409, + 0.569, + 0.589, + 0.606 + ], + "angle": 0, + "content": "\\[\n\\mathbb {E} [ \\tau_ {\\epsilon} ] < \\frac {(\\log \\frac {2}{\\epsilon} + 2 C _ {2}) ^ {2}}{C _ {1} \\epsilon^ {2}}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.621, + 0.435, + 0.638 + ], + "angle": 0, + "content": "Further, by Markov inequality, if we choose" + }, + { + "type": "equation", + "bbox": [ + 0.409, + 0.647, + 0.59, + 0.683 + ], + "angle": 0, + "content": "\\[\nT _ {\\epsilon , \\delta , 2} = \\frac {(\\log \\frac {2}{\\epsilon} + 2 C _ {2}) ^ {2}}{C _ {1} \\epsilon^ {2} \\delta}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.692, + 0.155, + 0.706 + ], + "angle": 0, + "content": "then," + }, + { + "type": "equation", + "bbox": [ + 0.393, + 0.715, + 0.605, + 0.751 + ], + "angle": 0, + "content": "\\[\n\\Pr \\left(\\tau_ {\\epsilon} > T _ {\\epsilon , \\delta , 2}\\right) < \\frac {\\mathbb {E} \\left[ \\tau_ {\\epsilon} \\right]}{T _ {\\epsilon , \\delta , 2}} < \\delta .\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.767, + 0.381, + 0.784 + ], + "angle": 0, + "content": "This concludes the proof for \\( K = 2 \\)." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.79, + 0.884, + 0.827 + ], + "angle": 0, + "content": "Now assuming the result holds for \\( K - 1 \\) and consider the case for \\( K \\), First, we choose a small enough constant \\( C_{\\delta ,\\epsilon ,K,N} > 0 \\), such that when \\( p_{K - 1}^{(0)} < C_{\\delta ,\\epsilon ,K,N} \\), the following two random processes are close:" + }, + { + "type": "text", + "bbox": [ + 0.157, + 0.849, + 0.731, + 0.869 + ], + "angle": 0, + "content": "- Running the algorithm for \\(N\\) steps on the \\(K\\) arms bandit yields \\(\\theta_i^{(t)}, i \\in [K]\\)" + }, + { + "type": "text", + "bbox": [ + 0.156, + 0.879, + 0.881, + 0.92 + ], + "angle": 0, + "content": "- Running the algorithm for \\(N\\) steps on a \\(K - 1\\) arms bandit yields \\(\\tilde{\\theta}_i^{(t)}, i \\in [K - 1]\\) with \\(\\tilde{\\theta}_i^{(0)} = \\theta_i^{(0)}, i < K - 1\\) and \\(\\tilde{\\theta}_{K - 1}^{(0)} = \\theta_K(0)\\)" + }, + { + "type": "list", + "bbox": [ + 0.156, + 0.849, + 0.881, + 0.92 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.943, + 0.508, + 0.954 + ], + "angle": 0, + "content": "18" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.114, + 0.033, + 0.431, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.096, + 0.504, + 0.113 + ], + "angle": 0, + "content": "and there exists a joint measure on \\(\\theta\\) and \\(\\tilde{\\theta}\\) such that" + }, + { + "type": "equation", + "bbox": [ + 0.318, + 0.119, + 0.677, + 0.139 + ], + "angle": 0, + "content": "\\[\n\\forall i \\in [ K - 2 ], t < N, \\Pr (| p _ {i} ^ {t} - \\tilde {p} _ {i} ^ {t} | > \\epsilon / 2) < \\delta / 6.\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.445, + 0.141, + 0.677, + 0.16 + ], + "angle": 0, + "content": "\\[\n\\operatorname * {P r} (| p _ {K} ^ {t} - \\tilde {p} _ {K - 1} ^ {t} | > \\epsilon / 2) < \\delta / 6.\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.461, + 0.163, + 0.677, + 0.182 + ], + "angle": 0, + "content": "\\[\n\\Pr \\left(\\left| p _ {K} ^ {t} - p _ {K} ^ {0} \\right| > \\epsilon / 2\\right) < \\delta / 6.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.196, + 0.886, + 0.245 + ], + "angle": 0, + "content": "This joint measure is constructed by choosing the corresponding arm for two process at each sampling step as long as the sampled arm is not \\( K \\) and uses the uniform convergence on \\( \\nabla \\log_{\\theta} p_i \\). Now following the same argument at \\( K = 2 \\), we can show that there exists \\( \\tilde{T}_{\\epsilon, \\delta, K} \\) such that" + }, + { + "type": "equation", + "bbox": [ + 0.289, + 0.252, + 0.709, + 0.281 + ], + "angle": 0, + "content": "\\[\n\\operatorname * {P r} (\\exists t < \\tilde {T} _ {\\epsilon , \\delta , K}, \\min _ {t \\in [ K ]} p _ {t} < C _ {\\delta , \\epsilon , K, T _ {\\epsilon / 2, \\delta / 2, K - 1}}) > 1 - \\delta / 2.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.295, + 0.886, + 0.343 + ], + "angle": 0, + "content": "Then we can invoke the induction hypothesis and uses the coupling shown above to show that if we choose \\( T_{\\epsilon, \\delta, K} = \\tilde{T}_{\\epsilon, \\delta, K} + T_{\\epsilon/2, \\delta/2, K-1} \\), then there exists a time step that one arm has probability higher than \\( 1 - \\epsilon \\) with probability at least \\( 1 - \\delta \\)." + }, + { + "type": "image", + "bbox": [ + 0.865, + 0.349, + 0.884, + 0.363 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.373, + 0.886, + 0.411 + ], + "angle": 0, + "content": "Lemma C.6. The REINFORCE algorithm without KL regularization (\\(\\beta = 0\\)) is self-enforcing stochastic (Definition C.2) once \\(p_{K+1}^{(t)} < 1/2\\)." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.424, + 0.553, + 0.44 + ], + "angle": 0, + "content": "Proof. The REINFORCE algorithm is self-enforcing because" + }, + { + "type": "equation", + "bbox": [ + 0.343, + 0.447, + 0.653, + 0.485 + ], + "angle": 0, + "content": "\\[\n\\mathbb {E} [ \\theta_ {i} ^ {(t + 1)} - \\theta_ {i} ^ {(t)} ] = \\eta p _ {i} (r _ {i} - \\sum_ {j \\in [ K + 1 ]} p _ {j} r _ {j}).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.5, + 0.178, + 0.515 + ], + "angle": 0, + "content": "Further," + }, + { + "type": "equation", + "bbox": [ + 0.429, + 0.521, + 0.569, + 0.544 + ], + "angle": 0, + "content": "\\[\n| \\theta_ {i} ^ {(t + 1)} - \\theta_ {i} ^ {(t)} | \\leq 1\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.55, + 0.752, + 0.588 + ], + "angle": 0, + "content": "and if we consider the distribution of \\(\\Delta_{i,i^*,t} = \\frac{\\left(\\theta_i^{(t + 1)} - \\theta_i^{(t)}\\right) - \\left(\\theta_{i^*}^{(t + 1)} - \\theta_{i^*}^{(t)}\\right)}{\\eta}\\), it holds that" + }, + { + "type": "equation", + "bbox": [ + 0.321, + 0.594, + 0.675, + 0.613 + ], + "angle": 0, + "content": "\\[\n\\Delta_ {i, i ^ {*}, t} = r _ {I _ {t}} \\left(\\mathbf {1} (i = I _ {t}) - \\mathbf {1} (i ^ {*} = I _ {t}) - p _ {i} + p _ {i ^ {*}}\\right)\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.315, + 0.65, + 0.68, + 0.668 + ], + "angle": 0, + "content": "\\[\n\\Pr \\left(\\Delta_ {i, i ^ {*}, t} = - 1 - p _ {i} + p _ {i} ^ {*}\\right) \\geq \\Pr \\left(I _ {t} = i ^ {*}\\right) = p _ {i ^ {*}}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.682, + 0.192, + 0.696 + ], + "angle": 0, + "content": "Therefore" + }, + { + "type": "equation", + "bbox": [ + 0.372, + 0.703, + 0.622, + 0.761 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\mathbb {E} \\left[ \\Delta_ {i, i ^ {*}, t} ^ {2} \\right] \\geq p _ {i ^ {*}} \\left(- 1 - p _ {i} + p _ {i} ^ {*}\\right) ^ {2} \\\\ \\geq p _ {i ^ {*}} (1 - p _ {i ^ {*}}) ^ {2} \\geq \\frac {\\epsilon^ {2}}{2 K}. \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.767, + 0.56, + 0.784 + ], + "angle": 0, + "content": "This then concludes the proof with \\( C_1 = \\eta / 2K \\) and \\( C_2 = \\eta \\)." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.79, + 0.886, + 0.829 + ], + "angle": 0, + "content": "Lemma C.7. The GRPO algorithm without KL regularization (\\(\\beta = 0\\)) is self-enforcing stochastic (Definition C.2) once \\(p_{K+1}^{(t)} < 1/2\\)." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.842, + 0.505, + 0.858 + ], + "angle": 0, + "content": "Proof. The GRPO algorithm is self-enforcing because" + }, + { + "type": "equation", + "bbox": [ + 0.127, + 0.865, + 0.868, + 0.891 + ], + "angle": 0, + "content": "\\[\n\\mathbb {E} [ \\theta_ {i} ^ {(t + 1)} - \\theta_ {i} ^ {(t)} ] = \\eta \\mathbb {E} [ \\tilde {r} _ {t} ^ {(g)} (\\mathbf {1} (I _ {t} ^ {(g)} = i) - p _ {i} ^ {(t)}) ] = \\eta \\mathbb {E} [ \\tilde {r} _ {t} ^ {(g)} \\mathbf {1} (I _ {t} ^ {(g)} = i) ] = \\eta \\mathbb {E} _ {\\mu_ {t}} [ \\mathbb {E} [ \\tilde {r} _ {t} ^ {(g)} \\mathbf {1} (I _ {t} ^ {(g)} = i) | \\mu_ {t} ] ].\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.901, + 0.729, + 0.92 + ], + "angle": 0, + "content": "Noted that \\(\\mathbb{E}[\\tilde{r}_t^{(g)}\\mathbf{1}(I_t^{(g)} = i)|\\mu_t]\\) is monotonous with \\(p_i\\), hence monotonous with \\(\\theta_{i}\\)." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.943, + 0.508, + 0.955 + ], + "angle": 0, + "content": "19" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.114, + 0.033, + 0.431, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.097, + 0.174, + 0.111 + ], + "angle": 0, + "content": "Further" + }, + { + "type": "equation", + "bbox": [ + 0.318, + 0.114, + 0.682, + 0.172 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} | \\theta_ {i} ^ {(t + 1)} - \\theta_ {i} ^ {(t)} | \\leq \\eta \\max _ {g} | \\tilde {r} _ {t} ^ {(g)} (\\mathbf {1} (I _ {t} ^ {(g)} = i) - p _ {i} ^ {(t)}) | \\\\ \\leq \\eta \\max _ {g} | \\tilde {r} _ {t} ^ {(g)} | \\leq \\eta \\sqrt {G}. \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.184, + 0.564, + 0.199 + ], + "angle": 0, + "content": "Now we only need to lower bound the second momentum of" + }, + { + "type": "equation", + "bbox": [ + 0.339, + 0.203, + 0.658, + 0.248 + ], + "angle": 0, + "content": "\\[\n\\Delta_ {i, i ^ {*}, t} = \\frac {\\left(\\theta_ {i} ^ {(t + 1)} - \\theta_ {i} ^ {(t)}\\right) - \\left(\\theta_ {i ^ {*}} ^ {(t + 1)} - \\theta_ {i ^ {*}} ^ {(t)}\\right)}{\\eta}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.266, + 0.197, + 0.28 + ], + "angle": 0, + "content": "Noted that" + }, + { + "type": "equation", + "bbox": [ + 0.358, + 0.283, + 0.638, + 0.328 + ], + "angle": 0, + "content": "\\[\n\\theta_ {i} ^ {(t + 1)} - \\theta_ {i} ^ {(t)} = \\frac {\\eta}{G} \\sum_ {g = 1} ^ {G} \\tilde {r} _ {t} ^ {(g)} \\mathbf {1} (I _ {t} ^ {(g)} = i).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.34, + 0.207, + 0.354 + ], + "angle": 0, + "content": "It holds that" + }, + { + "type": "equation", + "bbox": [ + 0.26, + 0.357, + 0.737, + 0.406 + ], + "angle": 0, + "content": "\\[\n\\sigma_ {t} = \\sqrt {\\frac {1}{G} \\sum_ {g} (r _ {t} ^ {g} - \\mu) ^ {2}} = \\sqrt {\\frac {1}{G} \\sum_ {g} r _ {t} ^ {g} - 2 \\mu r _ {t} ^ {g} + \\mu^ {2}} = \\sqrt {\\mu - \\mu^ {2}}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.419, + 0.304, + 0.438 + ], + "angle": 0, + "content": "Therefore when \\(r_t^{(g)} > 0\\)" + }, + { + "type": "equation", + "bbox": [ + 0.305, + 0.443, + 0.692, + 0.485 + ], + "angle": 0, + "content": "\\[\n\\tilde {r} _ {t} ^ {(g)} = \\frac {r _ {t} ^ {(g)} - \\mu_ {t}}{\\sigma_ {t}} = \\frac {1 - \\mu_ {t}}{\\sigma_ {t}} = \\sqrt {\\frac {1 - \\mu_ {t}}{\\mu_ {t}}} \\geq \\sqrt {\\frac {1}{G - 1}}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.498, + 0.641, + 0.518 + ], + "angle": 0, + "content": "Because all \\(\\tilde{r}_t^{(g)}\\) are the same when \\(r_t^{(g)} > 0\\), it holds that when \\(i \\in [K]\\)" + }, + { + "type": "equation", + "bbox": [ + 0.293, + 0.523, + 0.705, + 0.626 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\Delta_ {i, i ^ {*}, t} ^ {2} = \\frac {1}{G} \\frac {1 - \\mu_ {t}}{\\mu_ {t}} \\left(\\sum_ {g = 1} ^ {G} {\\bf 1} (I _ {t} ^ {(g)} = i) - {\\bf 1} (I _ {t} ^ {(g)} = i ^ {*})\\right) ^ {2} \\\\ \\geq \\frac {1}{G (G - 1)} \\left(\\sum_ {g = 1} ^ {G} \\mathbf {1} \\left(I _ {t} ^ {(g)} = i\\right) - \\mathbf {1} \\left(I _ {t} ^ {(g)} = i ^ {*}\\right)\\right) ^ {2}. \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.637, + 0.245, + 0.653 + ], + "angle": 0, + "content": "This then implies" + }, + { + "type": "equation", + "bbox": [ + 0.228, + 0.657, + 0.765, + 0.714 + ], + "angle": 0, + "content": "\\[\n\\mathbb {E} [ \\Delta_ {i, i ^ {*}, t} ^ {2} ] \\geq \\frac {1}{G (G - 1)} \\mathbb {E} \\left[ \\left(\\sum_ {g = 1} ^ {G} {\\bf 1} (I _ {t} ^ {(g)} = i) - {\\bf 1} (I _ {t} ^ {(g)} = i ^ {*})\\right) ^ {2} \\Big | \\mu_ {t} \\neq 1, 0 \\right]\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.726, + 0.644, + 0.746 + ], + "angle": 0, + "content": "One can without loss of generality assume \\( I_{t}^{(G)} = K + 1 \\) and show that" + }, + { + "type": "equation", + "bbox": [ + 0.243, + 0.751, + 0.755, + 0.842 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\mathbb {E} \\left[ \\Delta_ {i, i ^ {*}, t} ^ {2} \\right] \\geq \\frac {1}{G (G - 1)} \\mathbb {E} \\left[ \\left(\\sum_ {g = 1} ^ {G - 1} \\mathbf {1} \\left(I _ {t} ^ {(g)} = i\\right) - \\mathbf {1} \\left(I _ {t} ^ {(g)} = i ^ {*}\\right)\\right) ^ {2} \\right] \\\\ \\geq \\frac {1}{G} \\mathbb {E} \\left[ \\left(\\mathbf {1} \\left(I _ {t} ^ {(1)} = i\\right) - \\mathbf {1} \\left(I _ {t} ^ {(1)} = i ^ {*}\\right)\\right) ^ {2} \\right] = \\frac {p _ {i} + p _ {i} ^ {*}}{G} \\geq \\frac {1}{2 K G}. \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.853, + 0.885, + 0.898 + ], + "angle": 0, + "content": "When \\(i \\neq K\\), noted that \\(\\left(\\theta_{i}^{(t+1)} - \\theta_{i}^{(t)}\\right) - \\left(\\theta_{i^{*}}^{(t+1)} - \\theta_{i^{*}}^{(t)}\\right) > \\left(\\theta_{i}^{(t+1)} - \\theta_{i}^{(t)}\\right) > 0\\). Therefore, a similar bound can show that \\(\\mathbb{E}[\\Delta_{i,i^{*},t}^{2}] > \\frac{1}{2KG}\\). This then concludes the proof with \\(C_{1} = \\eta / 2KG\\) and \\(C_{2} = \\sqrt{G}\\)." + }, + { + "type": "text", + "bbox": [ + 0.866, + 0.903, + 0.883, + 0.916 + ], + "angle": 0, + "content": "□" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.943, + 0.508, + 0.954 + ], + "angle": 0, + "content": "20" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.114, + 0.033, + 0.431, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "title", + "bbox": [ + 0.112, + 0.097, + 0.59, + 0.114 + ], + "angle": 0, + "content": "C.4 Diversity Never Improves with KL regularization" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.126, + 0.886, + 0.159 + ], + "angle": 0, + "content": "Theorem C.8 (Diversity Preservation under KL Regularization). With \\( p_0 \\) as the initial policy and KL regularization hyperparameter \\( \\beta > 0 \\), if the REINFORCE process converges to policy \\( p^* \\). Then, \\( p^* \\) satisfies:" + }, + { + "type": "equation", + "bbox": [ + 0.324, + 0.164, + 0.673, + 0.204 + ], + "angle": 0, + "content": "\\[\n\\frac {p ^ {*} (i)}{\\sum_ {j = 1} ^ {K} p ^ {*} (j)} = \\frac {p _ {0} (i)}{\\sum_ {j = 1} ^ {K} p _ {0} (j)} \\quad \\forall i \\in \\{1, \\dots , K \\}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.21, + 0.886, + 0.24 + ], + "angle": 0, + "content": "Consequently, the distribution over the optimal arms under \\( p^* \\) matches the initial distribution \\( p_0 \\) restricted to these arms and renormalized." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.257, + 0.884, + 0.289 + ], + "angle": 0, + "content": "Proof. Using policy gradient theorem, we know that the converged policy \\( p^* \\) and corresponding parameter \\( \\theta^* \\) satisfy that," + }, + { + "type": "equation", + "bbox": [ + 0.349, + 0.295, + 0.649, + 0.341 + ], + "angle": 0, + "content": "\\[\n\\nabla_ {\\theta} \\left[ \\sum_ {i = 1} ^ {K + 1} r _ {i} p _ {i} + \\beta \\mathrm {K L} \\left(p | p ^ {0}\\right) \\right] \\Bigg | _ {\\theta = \\theta^ {*}} = 0\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.354, + 0.359, + 0.371 + ], + "angle": 0, + "content": "This then suggests that for any \\( k \\)" + }, + { + "type": "equation", + "bbox": [ + 0.283, + 0.378, + 0.715, + 0.422 + ], + "angle": 0, + "content": "\\[\nr _ {k} p _ {k} ^ {*} - p _ {k} ^ {*} \\sum_ {i = 1} ^ {K + 1} r _ {i} ^ {*} p _ {i} ^ {*} + \\beta \\sum_ {i = 1} ^ {K + 1} \\nabla_ {\\theta_ {k}} [ p _ {i} \\log p _ {i} - p _ {i} \\log p _ {i} ^ {0} ] = 0\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.436, + 0.267, + 0.452 + ], + "angle": 0, + "content": "This is equivalent to" + }, + { + "type": "equation", + "bbox": [ + 0.232, + 0.459, + 0.766, + 0.502 + ], + "angle": 0, + "content": "\\[\nr _ {k} p _ {k} ^ {*} - p _ {k} ^ {*} \\sum_ {i = 1} ^ {K + 1} r _ {i} ^ {*} p _ {i} ^ {*} + \\beta \\sum_ {i = 1} ^ {K + 1} (\\mathbf {1} (i = k) - p _ {k} ^ {*}) p _ {i} ^ {*} (\\log p _ {i} ^ {*} + 1 - \\log p _ {i} ^ {0}) = 0\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.517, + 0.206, + 0.534 + ], + "angle": 0, + "content": "Simplifying" + }, + { + "type": "equation", + "bbox": [ + 0.23, + 0.559, + 0.767, + 0.603 + ], + "angle": 0, + "content": "\\[\nr _ {k} + \\beta (\\log p _ {k} ^ {*} + 1 - \\log p _ {0}) = \\sum_ {i = 1} ^ {K + 1} r _ {i} ^ {*} p _ {i} ^ {*} + \\beta \\sum_ {i = 1} ^ {K + 1} p _ {i} ^ {*} (\\log p _ {i} ^ {*} + 1 - \\log p _ {i} ^ {0})\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.619, + 0.886, + 0.658 + ], + "angle": 0, + "content": "For all \\( k \\in [K] \\), we know that \\( r_k \\) is equivalent, therefore, \\( \\frac{p_k^*(i)}{p_0^*(i)} \\) is a constant for \\( k \\in [K] \\), concluding our proof." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.675, + 0.314, + 0.691 + ], + "angle": 0, + "content": "C.5 Technical Lemma" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.705, + 0.456, + 0.722 + ], + "angle": 0, + "content": "Lemma C.9. For \\(x\\in \\mathbb{R}\\) \\(|x| < C\\) , it holds that" + }, + { + "type": "equation", + "bbox": [ + 0.404, + 0.729, + 0.594, + 0.748 + ], + "angle": 0, + "content": "\\[\n\\exp (x) > 1 + x + A _ {C} x ^ {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.755, + 0.303, + 0.779 + ], + "angle": 0, + "content": "here \\(A_{C} = \\frac{\\exp(-C) + C - 1}{C^{2}}\\)" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.793, + 0.729, + 0.815 + ], + "angle": 0, + "content": "Proof. Define \\( g(x) = \\frac{\\exp(x) - 1 - x}{x^2} \\), this function monotonically increases when \\( x < 0 \\)." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.943, + 0.508, + 0.955 + ], + "angle": 0, + "content": "21" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.114, + 0.033, + 0.432, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.096, + 0.448, + 0.117 + ], + "angle": 0, + "content": "D Open-Thoughts Evaluation" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.132, + 0.885, + 0.18 + ], + "angle": 0, + "content": "We finetune Qwen2.5-7B-Instruct over OpenThoughts-114k for 5 epochs using BF16 and AdamW and hyperparameters lr=1e-5, bs=128, warmup=150 steps. We sample 40 reasoning traces with temperature set to 0.7 for each of the 30 problems in AIME24. Then we evaluate the following quantities." + }, + { + "type": "image_caption", + "bbox": [ + 0.402, + 0.193, + 0.589, + 0.209 + ], + "angle": 0, + "content": "Competition Math (AIME24)" + }, + { + "type": "image", + "bbox": [ + 0.195, + 0.218, + 0.396, + 0.368 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.401, + 0.218, + 0.599, + 0.367 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.605, + 0.218, + 0.804, + 0.368 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.112, + 0.381, + 0.887, + 0.43 + ], + "angle": 0, + "content": "Figure 10: Pass@K Evaluated on AIME24 over OpenThoughts-114K SFT checkpoints. We plot the expected Pass@K ± SD. Note that improvements in Pass@K slows down while Pass@1 improves at a constant rate. Furthermore, the confidence interval of Pass@1 widens, meaning the variance increases during SFT." + }, + { + "type": "image", + "bbox": [ + 0.118, + 0.45, + 0.268, + 0.562 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.272, + 0.45, + 0.422, + 0.562 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.426, + 0.451, + 0.576, + 0.561 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.58, + 0.451, + 0.728, + 0.561 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.734, + 0.451, + 0.882, + 0.561 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.111, + 0.574, + 0.886, + 0.621 + ], + "angle": 0, + "content": "Figure 11: Histogram of Pass@1 over AIME24. Variance of Pass@1 increases over finetuning on OpenThoughts-114K. We note that since AIME24 only has 30 questions, the density plot may not be completely reliable." + }, + { + "type": "image", + "bbox": [ + 0.272, + 0.645, + 0.725, + 0.833 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.112, + 0.848, + 0.885, + 0.881 + ], + "angle": 0, + "content": "Figure 12: We plot the average number of unique answers sampled over the total number samples i.e. \\( \\left|\\left\\{y_{i}\\right\\}_{i=1}^{n}\\right| / n \\). Model samples less diverse number of answers as SFT progresses." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.943, + 0.509, + 0.955 + ], + "angle": 0, + "content": "22" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.115, + 0.033, + 0.432, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.095, + 0.431, + 0.117 + ], + "angle": 0, + "content": "E Interpolation Coefficients" + }, + { + "type": "image_caption", + "bbox": [ + 0.135, + 0.141, + 0.312, + 0.155 + ], + "angle": 0, + "content": "WiSE-Step672 on MATH500" + }, + { + "type": "image", + "bbox": [ + 0.119, + 0.156, + 0.294, + 0.282 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.326, + 0.142, + 0.499, + 0.154 + ], + "angle": 0, + "content": "WiSE-Step672 on MATH500" + }, + { + "type": "image", + "bbox": [ + 0.307, + 0.155, + 0.482, + 0.281 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.511, + 0.142, + 0.687, + 0.154 + ], + "angle": 0, + "content": "WiSE-Step672 on MATH500" + }, + { + "type": "image", + "bbox": [ + 0.495, + 0.154, + 0.667, + 0.281 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.699, + 0.142, + 0.874, + 0.154 + ], + "angle": 0, + "content": "WiSE-Step672 on MATH500" + }, + { + "type": "image", + "bbox": [ + 0.68, + 0.155, + 0.855, + 0.281 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.138, + 0.286, + 0.312, + 0.298 + ], + "angle": 0, + "content": "WiSE-Step896 on MATH500" + }, + { + "type": "image", + "bbox": [ + 0.119, + 0.298, + 0.294, + 0.425 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.325, + 0.286, + 0.499, + 0.298 + ], + "angle": 0, + "content": "WiSE-Step896 on MATH500" + }, + { + "type": "image", + "bbox": [ + 0.307, + 0.298, + 0.482, + 0.425 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.512, + 0.286, + 0.686, + 0.298 + ], + "angle": 0, + "content": "WiSE-Step896 on MATH500" + }, + { + "type": "image", + "bbox": [ + 0.495, + 0.298, + 0.668, + 0.425 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.7, + 0.286, + 0.874, + 0.298 + ], + "angle": 0, + "content": "WiSE-Step896 on MATH500" + }, + { + "type": "image", + "bbox": [ + 0.68, + 0.298, + 0.855, + 0.425 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.135, + 0.43, + 0.314, + 0.442 + ], + "angle": 0, + "content": "WiSE-Step1120 on MATH500" + }, + { + "type": "image", + "bbox": [ + 0.119, + 0.442, + 0.299, + 0.569 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.321, + 0.43, + 0.501, + 0.442 + ], + "angle": 0, + "content": "WiSE-Step1120 on MATH500" + }, + { + "type": "image", + "bbox": [ + 0.307, + 0.442, + 0.487, + 0.569 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.51, + 0.43, + 0.69, + 0.442 + ], + "angle": 0, + "content": "WiSE-Step1120 on MATH500" + }, + { + "type": "image", + "bbox": [ + 0.495, + 0.442, + 0.674, + 0.569 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.696, + 0.43, + 0.878, + 0.442 + ], + "angle": 0, + "content": "WiSE-Step1120 on MATH500" + }, + { + "type": "image", + "bbox": [ + 0.68, + 0.442, + 0.861, + 0.569 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.111, + 0.582, + 0.885, + 0.631 + ], + "angle": 0, + "content": "Figure 13: Pass@1 versus Pass@K of WiSEFT of Qwen-2.5-0.5B trained and evaluated on MATH500. We interpolate between model \\(\\pmb{w}_0\\) at Step 112 with \\(\\pmb{w}_t\\) for \\(t\\in [672,896,1120]\\) as \\(\\delta \\pmb{w}_0 + (1 - \\delta)\\pmb{w}_t\\) where \\(\\delta \\in [0.1,0.9]\\)." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.943, + 0.508, + 0.954 + ], + "angle": 0, + "content": "23" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.115, + 0.033, + 0.432, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "image", + "bbox": [ + 0.117, + 0.112, + 0.314, + 0.255 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.317, + 0.112, + 0.5, + 0.255 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.5, + 0.112, + 0.687, + 0.255 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.682, + 0.112, + 0.874, + 0.255 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.124, + 0.258, + 0.307, + 0.4 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.312, + 0.259, + 0.493, + 0.4 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.5, + 0.259, + 0.681, + 0.4 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.687, + 0.259, + 0.869, + 0.4 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.125, + 0.405, + 0.3, + 0.547 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.312, + 0.405, + 0.487, + 0.547 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.5, + 0.405, + 0.689, + 0.547 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.687, + 0.405, + 0.879, + 0.547 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.125, + 0.552, + 0.3, + 0.692 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.312, + 0.552, + 0.487, + 0.692 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.5, + 0.552, + 0.676, + 0.692 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.687, + 0.552, + 0.879, + 0.692 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.125, + 0.698, + 0.3, + 0.84 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.312, + 0.698, + 0.487, + 0.84 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.5, + 0.698, + 0.676, + 0.84 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.687, + 0.698, + 0.879, + 0.84 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.111, + 0.853, + 0.885, + 0.903 + ], + "angle": 0, + "content": "Figure 14: Pass@1 versus Pass@K of WiSEFT of Gemma-2-2B trained and evaluated on GSM8K. We interpolate between model \\(\\pmb{w}_0\\) at Step 171 with \\(\\pmb{w}_t\\) for \\(t \\in [342, 684, 1026, 1368, 1710]\\) as \\(\\delta \\pmb{w}_0 + (1 - \\delta) \\pmb{w}_t\\) where \\(\\delta \\in [0.05, 0.9]\\)." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.943, + 0.509, + 0.955 + ], + "angle": 0, + "content": "24" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.114, + 0.033, + 0.432, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.096, + 0.475, + 0.117 + ], + "angle": 0, + "content": "F Measuring Diversity of Traces" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.132, + 0.884, + 0.163 + ], + "angle": 0, + "content": "We measure the diversity of the 100 sampled traces of Gemma-2-2B across GSM8k test. We measure diversity in terms of 3 different measures." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.176, + 0.884, + 0.208 + ], + "angle": 0, + "content": "Output Diversity The cardinality or number of unique answers in the set of all model outputs \\( \\left|\\{\\hat{y}_1,\\hat{y}_2,\\dots ,\\hat{y}_n\\}\\right| \\) over the total number of traces." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.211, + 0.886, + 0.273 + ], + "angle": 0, + "content": "Operation Diversity In GSM8k, each intermediate step consists of basic arithmetic operations, e.g. \\( 5 + 3 = 8 \\). We may simply map each of the traces to the sequence of arithmetic operations the model steps through, i.e. \\( r_i \\rightarrow [o_1, o_2, \\ldots, o_t] \\). This mapping is extracted by code. Then, given this set, we measure unique sequence of operations over the number of total traces." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.276, + 0.886, + 0.309 + ], + "angle": 0, + "content": "Semantic Diversity We measure the similarity of trace using cosine similarities between the text-embeddings (Bilmes, 2022; Yu et al., 2023)." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.322, + 0.482, + 0.34 + ], + "angle": 0, + "content": "F.1 Does temperature increase diversity?" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.351, + 0.831, + 0.368 + ], + "angle": 0, + "content": "Temperature does increase diversity, but it also increases the chances of sampling outlier answers." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.943, + 0.509, + 0.955 + ], + "angle": 0, + "content": "25" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.115, + 0.033, + 0.432, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "image_caption", + "bbox": [ + 0.392, + 0.211, + 0.606, + 0.226 + ], + "angle": 0, + "content": "Diversity Across SFT [T=0.8]" + }, + { + "type": "image", + "bbox": [ + 0.128, + 0.238, + 0.327, + 0.384 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.339, + 0.238, + 0.542, + 0.384 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.55, + 0.238, + 0.75, + 0.384 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.758, + 0.228, + 0.866, + 0.371 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.392, + 0.394, + 0.606, + 0.409 + ], + "angle": 0, + "content": "Diversity Across SFT [T=1.0]" + }, + { + "type": "image", + "bbox": [ + 0.128, + 0.422, + 0.327, + 0.566 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.34, + 0.422, + 0.543, + 0.566 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.551, + 0.422, + 0.75, + 0.566 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.758, + 0.412, + 0.866, + 0.554 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.393, + 0.578, + 0.606, + 0.592 + ], + "angle": 0, + "content": "Diversity Across SFT [T=1.5]" + }, + { + "type": "image", + "bbox": [ + 0.128, + 0.605, + 0.327, + 0.75 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.34, + 0.605, + 0.542, + 0.75 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.551, + 0.598, + 0.75, + 0.75 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.758, + 0.596, + 0.866, + 0.737 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.112, + 0.769, + 0.885, + 0.8 + ], + "angle": 0, + "content": "Figure 15: Diversity of traces sampled with Temperature \\(\\in\\) {0.8, 1.0, 1.5} for Gemma-2-2B SFT checkpoints on GSM8k" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.943, + 0.509, + 0.954 + ], + "angle": 0, + "content": "26" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.114, + 0.033, + 0.432, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "title", + "bbox": [ + 0.112, + 0.096, + 0.884, + 0.131 + ], + "angle": 0, + "content": "F.2 How well do token-level diverse decoding strategies compare with optimal strategy with oracle?" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.143, + 0.886, + 0.207 + ], + "angle": 0, + "content": "Hyperparameter Tuning Details We grid search for optimal temperature for all baselines over \\( T = [0.8, 1.0, 1.2, 1.5, 1.8] \\). For nucleus, we choose the best cutoff threshold between \\([0.8, 0.9, 0.95]\\). For min-p, we choose the best probability threshold between \\([0.01, 0.05, 0.1]\\). For tokenwise top-k, we choose best k between \\([12, 25, 50]\\)." + }, + { + "type": "table", + "bbox": [ + 0.292, + 0.217, + 0.709, + 0.326 + ], + "angle": 0, + "content": "
Decoding StrategyPass@2Pass@4Pass@8
Naive0.5650.6660.760
Nucleus0.5660.6680.757
Min-p0.5660.6680.760
Top-k0.5630.6660.756
Top-k w/Oracle0.7600.8320.901
" + }, + { + "type": "table_caption", + "bbox": [ + 0.182, + 0.334, + 0.816, + 0.352 + ], + "angle": 0, + "content": "Table 2: Best Pass@k of Sampling Strategies for Qwen-2.5-0.5B over SFT checkpoints" + }, + { + "type": "table", + "bbox": [ + 0.292, + 0.38, + 0.709, + 0.488 + ], + "angle": 0, + "content": "
Decoding StrategyPass@2Pass@4Pass@8
Naive0.5470.6480.737
Nucleus0.5280.6170.694
Min-p0.5500.6550.744
Top-k0.5380.6460.738
Top-k w/Oracle0.7300.8140.878
" + }, + { + "type": "table_caption", + "bbox": [ + 0.194, + 0.497, + 0.804, + 0.515 + ], + "angle": 0, + "content": "Table 3: Pass@k of Sampling Strategies for Qwen-2.5-0.5B at Last SFT Checkpoint" + }, + { + "type": "image", + "bbox": [ + 0.119, + 0.538, + 0.879, + 0.697 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.111, + 0.71, + 0.886, + 0.789 + ], + "angle": 0, + "content": "Figure 16: Pass@K over different Min-P thresholds \\(\\gamma \\in [0,0.3]\\) and temperatures \\(T\\in [1,1.6]\\) for Gemma2-2B finetuned on GSM8K. Generally, no min-p threshold paired with high temperature \\(\\mathrm{T} = 1.6\\) (in light green) is able to surpass the Pass@1 of \\(\\mathrm{T} = 1\\) with best min-p threshold (in orange). In other words, unlike WiSE-FT which increases both Pass@1 and Pass@K, Pass@1 tends to still decrease for the diverse decoding strategy of applying min-p with high temperature." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.943, + 0.508, + 0.955 + ], + "angle": 0, + "content": "27" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.115, + 0.033, + 0.432, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "image", + "bbox": [ + 0.118, + 0.096, + 0.307, + 0.238 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.31, + 0.097, + 0.498, + 0.237 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.507, + 0.097, + 0.691, + 0.237 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.7, + 0.097, + 0.882, + 0.237 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.351, + 0.246, + 0.741, + 0.263 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.209, + 0.275, + 0.789, + 0.294 + ], + "angle": 0, + "content": "Figure 17: Pass@k of Gemma-2-2B GSM8k Naive Sampling with Replacement" + }, + { + "type": "image", + "bbox": [ + 0.118, + 0.307, + 0.311, + 0.452 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.315, + 0.308, + 0.501, + 0.452 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.505, + 0.309, + 0.691, + 0.452 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.695, + 0.309, + 0.882, + 0.452 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.339, + 0.462, + 0.727, + 0.478 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.25, + 0.492, + 0.748, + 0.51 + ], + "angle": 0, + "content": "Figure 18: Pass@k of Gemma-2-2B GSM8k Oracle Top K Sampling" + }, + { + "type": "image", + "bbox": [ + 0.118, + 0.533, + 0.307, + 0.685 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.31, + 0.534, + 0.497, + 0.684 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.497, + 0.534, + 0.69, + 0.684 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.69, + 0.534, + 0.882, + 0.684 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.318, + 0.697, + 0.709, + 0.713 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.202, + 0.727, + 0.795, + 0.746 + ], + "angle": 0, + "content": "Figure 19: Pass@k of Qwen-2.5-0.5B GSM8k Naive Sampling with Replacement" + }, + { + "type": "title", + "bbox": [ + 0.112, + 0.767, + 0.591, + 0.785 + ], + "angle": 0, + "content": "F.3 Diversity Comparison Between SFT and WiSE-FT" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.943, + 0.509, + 0.955 + ], + "angle": 0, + "content": "28" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.115, + 0.033, + 0.432, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "image", + "bbox": [ + 0.118, + 0.154, + 0.312, + 0.304 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.315, + 0.154, + 0.5, + 0.304 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.5, + 0.154, + 0.692, + 0.304 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.692, + 0.155, + 0.882, + 0.304 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.324, + 0.316, + 0.71, + 0.334 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.244, + 0.346, + 0.755, + 0.365 + ], + "angle": 0, + "content": "Figure 20: Pass@k of Qwen-2.5-0.5B GSM8k Oracle Top K Sampling" + }, + { + "type": "image", + "bbox": [ + 0.18, + 0.489, + 0.816, + 0.637 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.183, + 0.644, + 0.816, + 0.793 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.111, + 0.81, + 0.885, + 0.856 + ], + "angle": 0, + "content": "Figure 21: Operation, Semantic, and Answer Diversity of Gemma-2-2B checkpoints of SFT over GSM8K versus the corresponding WiSE-FT variants (with the earliest checkpoint). We decode with temperature set to 1.0." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.943, + 0.509, + 0.955 + ], + "angle": 0, + "content": "29" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.115, + 0.033, + 0.432, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "image", + "bbox": [ + 0.182, + 0.319, + 0.819, + 0.625 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.111, + 0.643, + 0.885, + 0.688 + ], + "angle": 0, + "content": "Figure 22: Operation, Semantic, and Answer Diversity of Gemma-2-2B checkpoints of SFT over GSM8K versus the corresponding WiSE-FT variants (with the earliest checkpoint). We decode with temperature set to 1.6." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.943, + 0.509, + 0.955 + ], + "angle": 0, + "content": "30" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.114, + 0.033, + 0.432, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.096, + 0.378, + 0.114 + ], + "angle": 0, + "content": "G Best of K Evaluation" + }, + { + "type": "image", + "bbox": [ + 0.116, + 0.14, + 0.307, + 0.243 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.309, + 0.141, + 0.498, + 0.243 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.501, + 0.141, + 0.691, + 0.243 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.693, + 0.141, + 0.882, + 0.243 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.382, + 0.248, + 0.617, + 0.269 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.112, + 0.281, + 0.886, + 0.314 + ], + "angle": 0, + "content": "Figure 23: Best@K performance on MATH500 with ORM verifier, comparing different SFT and WiSE-FT checkpoints of Qwen-2.5-0.5B for \\( K = 2,4,8,32 \\)" + }, + { + "type": "image", + "bbox": [ + 0.117, + 0.336, + 0.379, + 0.49 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.382, + 0.336, + 0.63, + 0.49 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.635, + 0.337, + 0.882, + 0.49 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.351, + 0.501, + 0.645, + 0.528 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.117, + 0.532, + 0.373, + 0.685 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.385, + 0.532, + 0.627, + 0.685 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.639, + 0.532, + 0.88, + 0.685 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.347, + 0.698, + 0.647, + 0.723 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.111, + 0.738, + 0.886, + 0.784 + ], + "angle": 0, + "content": "Figure 24: Best@K performance on MATH500 with ORM (Top) and Majority Vote (Bottom) for early, middle, and late SFT checkpoints and WiSE-FT counterparts, showing Qwen-2.5-0.5B's scaling across K values." + }, + { + "type": "title", + "bbox": [ + 0.112, + 0.806, + 0.805, + 0.827 + ], + "angle": 0, + "content": "H Diversity Collapse and WiSE-FT Results for the Coding Task" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.842, + 0.886, + 0.92 + ], + "angle": 0, + "content": "To test whether coding tasks exhibit the same diversity collapse observed in reasoning benchmarks, we fine-tuned the Qwen2.5-coder-0.5B model for 10 epochs on the Magicoder-Evol-Instruct-110K dataset, following the Stage 2 SFT recipe from OpenCoder LLM. We then applied WiSE-FT by interpolating the weights of the second SFT checkpoint with the initial model using interpolation ratio 0.5. Both the original SFT checkpoints and their WiSE-FT counterparts were evaluated on HumanEval for pass@k." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.943, + 0.508, + 0.955 + ], + "angle": 0, + "content": "31" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.115, + 0.033, + 0.432, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "image", + "bbox": [ + 0.116, + 0.095, + 0.303, + 0.2 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.308, + 0.095, + 0.497, + 0.2 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.502, + 0.096, + 0.688, + 0.2 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.695, + 0.096, + 0.882, + 0.2 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.379, + 0.205, + 0.615, + 0.226 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.111, + 0.238, + 0.885, + 0.27 + ], + "angle": 0, + "content": "Figure 25: Best@K performance on MATH500 with majority voting, comparing different SFT and WiSE-FT checkpoints of Qwen-2.5-0.5B for \\( K = 2, 4, 8, 32 \\)" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.295, + 0.884, + 0.343 + ], + "angle": 0, + "content": "We found that, much like in mathematical reasoning tasks, SFT on coding data indeed suffers from diversity collapse: although pass@1 steadily improves over epochs, pass@k begins to deteriorate. And WiSE-FT still improves performance and mitigates the diversity collapse." + }, + { + "type": "image_caption", + "bbox": [ + 0.346, + 0.356, + 0.658, + 0.37 + ], + "angle": 0, + "content": "HumanEval - Pass@k Across SFT Checkpoints" + }, + { + "type": "image", + "bbox": [ + 0.118, + 0.377, + 0.365, + 0.52 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.373, + 0.378, + 0.622, + 0.519 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.633, + 0.379, + 0.88, + 0.52 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.172, + 0.533, + 0.825, + 0.55 + ], + "angle": 0, + "content": "Figure 26: Pass@K performance of SFT checkpoints on HumanEval (temperature = 1.0)." + }, + { + "type": "image_caption", + "bbox": [ + 0.3, + 0.572, + 0.703, + 0.586 + ], + "angle": 0, + "content": "HumanEval - Pass@k Across Checkpoints (SFT vs WiSE-FT)" + }, + { + "type": "image", + "bbox": [ + 0.118, + 0.594, + 0.365, + 0.736 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.375, + 0.594, + 0.622, + 0.735 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.633, + 0.595, + 0.879, + 0.735 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.117, + 0.748, + 0.879, + 0.765 + ], + "angle": 0, + "content": "Figure 27: Comparison of pass@K for SFT checkpoints and their WiSE-FT counterparts at \\( k = 1 \\), 16, 64." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.943, + 0.509, + 0.955 + ], + "angle": 0, + "content": "32" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.115, + 0.033, + 0.432, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "image_caption", + "bbox": [ + 0.162, + 0.323, + 0.877, + 0.343 + ], + "angle": 0, + "content": "HumanEval - Last Checkpoint (1700) Comparison: SFT vs WiSE-FT" + }, + { + "type": "image", + "bbox": [ + 0.12, + 0.351, + 0.868, + 0.659 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.172, + 0.674, + 0.825, + 0.692 + ], + "angle": 0, + "content": "Figure 28: Pass@K performance of the final SFT checkpoint versus its WiSE-FT variant." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.943, + 0.509, + 0.955 + ], + "angle": 0, + "content": "33" + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_10xxx/2504.10478/c3757ad9-b534-4066-bb62-c73ee2cc8c04_origin.pdf b/data/2025/2504_10xxx/2504.10478/c3757ad9-b534-4066-bb62-c73ee2cc8c04_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..7b47289f93fe91efa4e8d4d53922c19a73656c01 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/c3757ad9-b534-4066-bb62-c73ee2cc8c04_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b472ea59c5763afc03c43e3f3dd60e3fdf06da4497077fceea24202074486ce2 +size 5283981 diff --git a/data/2025/2504_10xxx/2504.10478/full.md b/data/2025/2504_10xxx/2504.10478/full.md new file mode 100644 index 0000000000000000000000000000000000000000..a2d3b441cbd54cb7c017eb8b94ebc2946a742ff8 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/full.md @@ -0,0 +1,960 @@ +# Weight Ensembling Improves Reasoning in Language Models + +Xingyu Dang\*,1 Christina Baek\*,2 Kaiyue Wen3 Zico Kolter2 Aditi Raghunathan2 + +$^{1}$ Tsinghua University $^{2}$ Carnegie Mellon University $^{3}$ Stanford University + +$\text{品}$ dangxy20@mails.tsinghua.edu.cn,kbaek@andrew.cmu.edu + +# Abstract + +We investigate a failure mode that arises during the training of reasoning models, where the diversity of generations begins to collapse, leading to suboptimal test-time scaling. Notably, the Pass@1 rate reliably improves during supervised finetuning (SFT), but Pass@k rapidly deteriorates. Surprisingly, a simple intervention of interpolating the weights of the latest SFT checkpoint with an early checkpoint, otherwise known as WiSE-FT, almost completely recovers Pass@k while also improving Pass@1. The WiSE-FT variant achieves better test-time scaling (Best@k, majority vote) and achieves superior results with less data when tuned further by reinforcement learning. Finally, we find that WiSE-FT provides complementary performance gains that cannot be achieved only through diversity-inducing decoding strategies, like temperature scaling. We formalize a bias-variance tradeoff of Pass@k with respect to the expectation and variance of Pass@1 over the test distribution. We find that WiSE-FT can reduce bias and variance simultaneously, while temperature scaling inherently trades off between bias and variance. + +# 1 Introduction + +Recent advances in large language models (LLMs) have showcased their remarkable ability to perform complex reasoning, yet these successes often hinge on test-time scaling strategies (Lightman et al., 2023; Snell et al., 2024; Wu et al., 2024). In many applications, such as math problems, puzzles, and logical reasoning, LLMs employ a verification framework where it is significantly easier for the model to verify a candidate solution than to generate one from scratch. This distinction has given rise to strategies that sample multiple "reasoning traces" or sequences of reasoning steps during inference, selecting the best final guess through an outcome reward model (ORM) or majority vote. In this setting, an upper bound on the performance a model could achieve is measured by Pass@K, or the probability that at least one out of $K$ independently sampled reasoning traces is correct. + +Unfortunately, while the standard training pipeline of supervised finetuning (SFT) followed by reinforcement learning (RL) dependably improves Pass@1 for reasoning, Pass@K tends to drop early into finetuning (Cobbe et al., 2021; Chow et al., 2024a; Chen et al., 2025). This mismatch arises from a symptom of finetuning called diversity collapse, where overtuned models yield less diverse generations. This is detrimental to Pass@K since the model wastes $K$ attempts on only a handful of guesses. In fact, by analyzing the model's error rate i.e., 1 - Pass@1, across the test distribution, we derive a Pass@K bias-variance trade-off. To improve expected test Pass@K, one can either reduce the bias which is the expected error rate or how much the model's error rate varies across problems. The latter term is connected to diversity - more diversity allows models to hedge and do uniformly well across all test questions. In particular, during SFT, Pass@1 improves (bias ↓) at the cost of diversity collapse (variance ↑). + +Surprisingly, common ways of alleviating diversity collapse, such as early stopping at peak Pass@K or decoding with high temperature, suffer from the reverse trade-off: diversity improves (variance $\downarrow$ ) at the cost of overall Pass@1 degrading (bias $\uparrow$ ). Consequently, in this paper we are concerned with a central question: + +Figure 1: Pass@k of WiSE-FT versus SFT on GSM8k Gemma-2-2B supervised finetuned and evaluated on GSM8k. At each SFT timestep $t$ , we evaluate Pass@k of checkpoint $w_{t}$ (in dashed) with its WiSE-FT variant $1/2 \cdot w_{t} + 1/2 \cdot w_{0}$ (in solid), where traces are independently sampled with temperature $T = [0.7, 1.0, 1.3, 1.6]$ . +![](images/9a00ddd660bf8f8eeda9cd85892cb8a7e3465e904ab2b4e6cf073e2f5f617379.jpg) +--- SFT T=0.7 --- SFT T=1.0 WiSE-FT T=1.0 SFT T=1.3 SFT T=1.6 + +![](images/3536f4b1df50cd66ddd3bfbccb80c35fc10834c00925c31728553b31b2fbfd2a.jpg) + +![](images/19a01a99597df80ac8df614f4c1787a0a5b99ab4663cc34196f872471af91463.jpg) + +Is it possible to simultaneously improve both Pass@1 and Pass@K, thereby overcoming the bias-variance tradeoff inherent in current approaches? + +In our work, we introduce a simple, scalable and effective intervention that allows models to achieve both high Pass@K and Pass@1 across mathematical reasoning tasks GSM8k, MATH, and AIME. The specific technique we use is a variant of WiSE-FT (Wortsman et al., 2022) where we interpolate the weights of the latest SFT checkpoint $\boldsymbol{w}_t$ with an early checkpoint $w_0$ as $\boldsymbol{w}_{\mathrm{WiSE}(t)} = \frac{1}{2} \cdot \boldsymbol{w}_0 + \frac{1}{2} \cdot \boldsymbol{w}_t$ . Our key finding is that WiSE-FT successfully merges the diverse sampling capabilities of earlier checkpoints while retaining or surpassing the Pass@1 of later checkpoints. In Figure 1, we observe that the WiSE-FT model achieves both higher Pass@K and Pass@1 with more SFT steps $t$ , unlike naive SFT which suffers from an early decay in Pass@K. Moreover, the gains with WiSE-FT is unachievable by early-stopping or diversity-aware decoding alone. + +Thus, we propose a new paradigm of training reasoning models: 1.) Train extensively using SFT as long as Pass@1 improves, 2.) Perform WiSE-FT with an earlier SFT checkpoint, 3.) Continue tuning the WiSE-FT variant using RL. Overall, the WiSE-FT model has the following immediate practical benefits: + +- Better Test-Time Scaling Across all datasets and base models, the WiSE-FT variant achieves the highest performance with test-time scaling (Majority Vote, ORM) compared to an overtrained SFT model paired with diversity-aware decoding. +- Better Reinforcement Learning Since RL uses self-generated data to tune models, to generalize reliably, it is important for generations to provide sufficient learning signal while also having high coverage over the data space. We find that continued RL training starting from WiSE-FT weights achieves superior results with less synthetic data compared to initializing RL from the last SFT checkpoint and even early-stopped SFT. + +In summary, we provide a comprehensive analysis of how reasoning models suffer from diversity collapse during SFT and its negative downstream impact during RL and test-time scaling. We first discuss our WiSE-FT findings in §4. Motivated by this discovery, we investigate two fundamental questions. First, we investigate diversity collapse during SFT and RL of reasoning models in §5. Diversity collapse not only impacts the model's ability to attempt different guesses. In fact, we make an even stronger observation - the generations of reasoning models converge towards a single reasoning trace for each test question. We theoretically prove that standard RL algorithms (i.e., REINFORCE and GRPO) fail to recover lost diversity in a simplified discrete bandit setting. + +Second, we formalize the competing goals of Pass@1 and Pass@K as a bias-variance trade-off in §6. We empirically measure and compare the bias and variance of WiSE-FT versus early-stopping versus high temperature decoding. Notably, only WiSE-FT reduces both bias and variance. We conclude with a remark on the limitations of decoding strategies such as top-k (Shao et al., 2017), nucleus (Holtzman et al., 2020), and min-p (Nguyen et al., 2024), at eliciting the maximum capabilities with test-time scaling from current reasoning models. + +# 2 Related Works + +Diversity collapse with SFT: The standard pipeline for enhancing reasoning in LLMs involves an initial phase of supervised fine-tuning (SFT) followed by reinforcement learning (RL) (Guo et al., 2025; Setlur et al., 2024). SFT is critical for instilling interpretable and readable reasoning chains and ensuring that the model adheres to a consistent rollout templates (Guo et al., 2025). However, a number of recent works have identified critical pitfalls of SFT that hinders the model's ability to explore and ultimately it's overall problem solving ability. Notably, Cobbe et al. (2021) observe diversity collapse when finetuning on GSM8k training dataset, during which the Pass@1 continuously improves whereas Pass@k starts to fall shortly into the training. Similar diversity collapse phenomenon also exists in the self-improvement setting with SFT (Song et al., 2024), and is theoretically investigated as the sharpening effect (Huang et al., 2024). This is not desirable as diverse sampling at inference is important for test-time scaling using majority voting (Wang et al., 2023) or reward model guided search (Setlur et al., 2024; Beeching et al., 2024). Yeo et al. (2025); Chu et al. (2025) attribute this behavior to overfitting, memorization of samples and overfixation to a template style leading to reduced generalization. In our work, we corroborate similar findings and propose ensembling over the course of SFT as a mitigation strategy. + +Mitigating diversity collapse: Given the importance of diversity for effectively scaling inference-time compute, several recent works have proposed auxiliary finetuning objectives and decoding strategies to mitigate diversity collapse. Li et al. (2025) regularize the SFT process using a game-theoretic framework that encourages sparse updates, thereby preserving output diversity. Zhang et al. (2024b) directly optimizes for diversity during finetuning. Other approaches modify the finetuning procedure to directly optimize for Best-of-N sampling at inference time (Chow et al., 2024b; Sessa et al., 2024; Chen et al., 2025). Another line of work focuses on inference-time decoding, explicitly encouraging diverse solutions through modified beam search strategies (Vijayakumar et al., 2018; Olausson et al., 2024; Chen et al., 2024; Beeching et al., 2024). Li et al. (2023) improve diversity during parallel decoding by appending curated prompts to the input. In formal reasoning settings e.g., Lean, methods such as Monte Carlo tree search have been used to diversify intermediate reasoning steps, as demonstrated in AlphaProof (AlphaProof and AlphaGeometry teams, 2024). In this work, we identify a simple and complementary intervention during the finetuning process to maintain the diversity of generations. We especially care about enforcing diversity while preserving the overall accuracy of generations. + +# 3 Preliminaries and Experimental Setup + +# 3.1 Pass@k, Best@k, and Majority Vote + +Given a reasoning model $f(\cdot)$ , a decoding strategy $D$ , and problem $x$ , the model's solution is obtained by sampling a reasoning trace $r := [x, s^{(1)}, s^{(2)}, \dots, s^{(n)}, \hat{y}]$ consisting of a sequence of intermediate steps $s^{(i)}$ and a final guess $\hat{y}$ . Given $k$ independently sampled traces, Pass@K measures the probability that at least one guess matches the true answer $y$ : + +$$ +\operatorname {P a s s} @ \mathrm {K} (x) = \mathbb {E} _ {[ \boldsymbol {r} _ {i} ] _ {i = 1} ^ {k} \sim D (f (x))} [ \mathbb {1} \{\exists i \in [ k ] \text {s . t .} \hat {y} _ {i} = y \} ] = 1 - (1 - \rho_ {x}) ^ {K} \tag {1} +$$ + +where $\rho_{x} = P(\hat{y} = y\mid x,f,D)$ is the Pass@1 or marginal probability of sampling the ground truth answer. Then $(1 - \rho_x)^K$ is the probability that all $K$ guesses are incorrect. We will refer to Pass@1 as $\rho_{x}$ interchangeably in our paper. + +In practice, test-time compute is scaled by selecting one of $K$ guesses either by a output reward model (ORM) or Majority Vote. Then we can measure Best@K as + +$$ +\operatorname {B e s t} @ \mathrm {K} (x) = \mathbb {E} _ {[ \boldsymbol {r} _ {i} ] _ {i = 1} ^ {k} \sim D (f (x))} [ \hat {y} _ {i ^ {*}} = y ] \text {w h e r e} i ^ {*} = \arg \max _ {i \in [ K ]} \sum_ {j = 1} ^ {K} \mathbb {1} \left\{\hat {y} _ {i} = \hat {y} _ {j} \right\} \text {o r} \operatorname {O R M} (\boldsymbol {r} _ {i}) +$$ + +Notably, Pass@K is equivalent to Best@K using a perfect ORM verifier. As we will observe, WiSE-FT achieves both higher Pass@1 and Pass@K and this directly translates to achieving better Best@K with an ORM verifier and by Majority Vote. + +# 3.2 Weight-Space Ensembling (WiSE-FT) + +WiSE-FT is a weight-space ensembling technique proposed by Wortzman et al. (2022) to improve the out-of-distribution accuracy of finetuned models at no extra computational cost. In particular, while models tend to achieve better in-distribution performance after finetuning, they tend to be less robust to distribution shift. Surprisingly, by simply interpolating the weights of the finetuned model $\boldsymbol{w}_t$ with the pretrained weights $\boldsymbol{w}_0$ + +$$ +\boldsymbol {w} _ {\mathrm {W i S E} (t)} = \delta \cdot \boldsymbol {w} _ {0} + (1 - \delta) \cdot \boldsymbol {w} _ {t} \tag {2} +$$ + +WiSE-FT can achieve best of both words: the out-of-distribution accuracy of models improves without incurring a drop in in-distribution accuracy. Similar to this philosophy, we apply weight ensembling to achieve both the diverse generation ability of early SFT checkpoints while maintaining the high Pass@1 accuracy of later SFT checkpoints. + +# 3.3 Training and Evaluation Pipeline + +The majority of our experiments are conducted on Gemma-2-2B and Qwen-2.5-0.5B. We perform SFT on a 30K subset of rephrased augmentations of GSM8k (Cobbe et al., 2021) and MATH (Hendrycks et al., 2021) in MetaMath40k (Yu et al., 2023) for 1710 steps or 10 epochs. We then continue finetuning on another 30K subset of rephrased training questions from MetaMath using Group Relative Policy Optimization (GRPO) with a binary reward of the correctness of the model's final answer. Finally, we evaluate models on GSM8K and MATH500, respectively. To estimate the true Pass@K and Pass@1 marginalized over the distribution of sampled traces, we sample 100 reasoning traces per test example and average over them to estimate Pass@1, i.e. $\rho_{x}$ . Then to calculate Pass@K, we use the theoretical formula $1 - (1 - \rho_{x})^{K}$ in Equation 1. Unless noted otherwise, we employ a naive decoding strategy with top-p threshold 0.9, temperature $T = 0.8$ , and top-k with $K = 50$ . + +# 4 Improving Diverse Reasoning Capabilities by WiSE-FT + +We first carefully track Pass@K for $K \in \{1, 4, 32\}$ across the SFT trajectory of Qwen-2.5-0.5B and Gemma-2-2B. Similar to findings from Cobbe et al. (2021); Chen et al. (2025), we observe that Pass@1 continues to improve with longer SFT, whereas for larger $K = 4, 32$ , Pass@K tends to peak much earlier on in training (in Figure 1, 17, and 19). In other words, while later SFT checkpoints achieve higher Pass@1, earlier SFT checkpoint achieve higher Pass@K. This tradeoff in model selection is not ideal downstream for test-time scaling. + +Building upon this intuition, we propose weight ensembling between earlier and later SFT checkpoints. We apply a variant of WiSE-FT where instead of the pretrained model, we interpolate between the earliest SFT checkpoint (in our case, after 1 epoch of training) and the weights of later checkpoint. As shown in Figure 2, we observe a "sweet spot" of interpolation coefficients $\delta \in (0,1)$ where the WiSE-FT model achieves both higher Pass@1 than the last SFT model and higher Pass@K than the early SFT model. We will fix $\delta = 1/2$ , which generally performs decently for all of the datasets we've tested. In fact, after WiSE-FT $w_{\mathrm{WiSE}(t)}$ , both Pass@1 and Pass@k grow monotonically with SFT steps $t$ (see Figure 1). + +![](images/546890eae7b6307835210a35ac4546692989e395b2041aa92121b17022a37557.jpg) +Figure 2: Pass@1 vs. Pass@K across Interpolation Coefficients We perform WiSEFT with $\delta \in [0.1, 0.9]$ between the first and last checkpoints of model (in legend) finetuned on GSM8K, MATH, and OpenThoughts-114K, then evaluate on GSM8K, MATH500, and AIME24, respectively. Early SFT model observe higher Pass@K (y-axis) while later SFT model observes higher Pass@1 (x-axis). The interpolated model observe best of both metrics. + +![](images/6c5fa0e260a30825c41c9e0eaa75949a07f90449475e4e78fc1c1a49f05915b4.jpg) + +![](images/5137cb2f59693ab411059d8208f1a96ff60109df3970efcaaad046b30b11e30c.jpg) + +Better Test-Time Scaling This boost in both Pass@1 and Pass@K directly translates to better performance with test-time scaling. We measure Best@K by Majority Vote and by selecting the reasoning trace with highest reward using an off-the-shelf ORM RLHFlow/Llama3.1-8B-PRM-Deepseek-Data (Xiong et al., 2024). We evaluate the performance of the last SFT checkpoint with highest Pass@1 versus the corresponding WiSE-FT variant with $\delta = 1/2$ . In Figure 3, we see that the performance gap on MATH500 between the final Gemma-2-2B SFT checkpoint and Wise-FT model widens with larger $K$ . The WiSE-FT model achieves $5 - 7\%$ better performance with test-time scaling. + +Better RL Scaling WiSE-FT's ability to achieve both high Pass@1 and Pass@K is particularly advantageous for continued RL training where models are further trained by policy gradient methods using self-generated data. In particular, WiSE-FT is able to generate data rich in learning signal (high Pass@1) while still having high coverage over the data space (high Pass@K). We continue training on rephrased training questions of GSM8K and MATH using GRPO paired with a binary reward of the correctness of the final guess. Across runs, we observe that continued RL training starting from the final WiSE-FT model improves performance more stably than finetuning starting from the final SFT checkpoint. Notably the final SFT checkpoint suffers low coverage over the data space, causing Pass@1 to improve slowly. We also try continued RL training from an earlier SFT checkpoint with peak Pass@4 performance. While RL scales better over the early SFT checkpoint in comparison to the final checkpoint, the performance still remains subpar compared to WiSE-FT. + +# 4.1 General Purpose Reasoning Models + +So far we have studied the effect of WiSE-FT on models tuned on reasoning data for the same specific reasoning task (e.g., train on GSM8k and evaluate on GSM8k). We've additionally tested how well our findings generalize to models trained on general purpose reasoning datasets and tested on a out-of-distribution reasoning task. We take Qwen2.5-7B-Instruct and SFT for 5 epochs on OpenThoughts-114k, a high-quality synthetic dataset of math, science, and coding questions paired with DeepSeek-R1 completions, then evaluate its performance on AIME24 competition problems (with ASY code for figures from Muennighoff et al. (2025)). In this setting, the Pass@K trends during SFT on is more subtle. We still observe diversity collapse in Figure 12, but the affect is not strong enough for Pass@K to drop back down. However, we observe that the rate at which Pass@K improves for $K \in \{16,32\}$ slows down early while Pass@1 grows at a constant rate (Figure 10). We then perform WiSE-FT between the final and earlier checkpoint with + +![](images/4784db23bc7951ee2fc6656f65f8c2d3d009a5771392f48ebe1e24aa859028e1.jpg) + +![](images/281b7e3b04727e31eb48c9f9eb0dac923c0ed6c74f21659a0a4d939eab7dbcdc.jpg) + +![](images/e3b0978da07922462da538218a0295754fb4a5a2ab33dcf01d466532d3e49fa5.jpg) + +![](images/38159ef7ae6f51b78b1f51b27ac07d7019b9006e97ddbd5054b02d32b076acac.jpg) +(a) +(b) + +![](images/e8032220c0101b093c7abbf09b817da62b4a68f6faf283ab9fe582fdf70c1c5e.jpg) +(c) +Figure 3: Downstream Advantages of WiSE-FT: (a) Best@K on MATH500 of the final SFT Gemma2-2B checkpoint and its WiSE-FT counterpart. (b) Pass@K on AIME24 WiSE-FT after SFT on general purpose reasoning dataset OpenThoughts-114k achieves higher Pass@K on AIME24. (c) RL Scaling Gemma and Qwen SFT checkpoints further tuned by GRPO on GSM8K and MATH, respectively. RL from the final WiSE-FT model achieves higher Pass@1 with less data compared to GRPO starting from both early and late SFT checkpoints. + +higher diversity. We choose early checkpoint at epoch 3 where improvements in Pass@K begin to slow. Similarly, we observe that WiSE-FT improves both Pass@1 and Pass@K in Figure 2. + +# 5 Diversity Collapse during Finetuning + +In previous sections we alluded to the phenomenon where $\mathrm{Pass}@\mathrm{K}$ decreases because SFT and RL induces diversity collapse in reasoning traces. To verify this hypothesis, we sample 100 traces per test GSM8k problem and measure diversity using three metrics: + +1. Answer Diversity: The fraction of unique guesses $\hat{y}$ among reasoning traces. +2. Operation Diversity: The fraction of unique sequence of arithmetic operations performed among reasoning traces (In GSM8k, each intermediate step consists of a basic arithmetic operation, e.g. $5 + 3 = 8$ ). +3. Semantic Diversity: The average cosine similarity between the text embeddings of the reasoning traces, computed using Stella-400M-v5 (Zhang et al., 2024a) + +![](images/97906774fe3390f2c8dce9d365178b77721d1991265e137bf86ad95237738532.jpg) +Figure 4: Diversity Collapse The answer, semantic, and operation diversity of Gemma-2-2B reasoning traces across GSM8k test examples. Colors map to different SFT checkpoints. + +![](images/c09374f5d22f90dca1bd1db2225d9246275d5d6c3a44de40384a8f4f0172de11.jpg) + +![](images/6b301ba7c226d983f2678b214edd7278f1cfb4320bcae13b40fbe1456b86da77.jpg) +Diversity Across SFT $[T = 0.8]$ + +![](images/0ca043e2ceef566089e4e9de31db5711daf3366c9e45fb0dd7cea3659355b3d9.jpg) + +![](images/33b82e197b5263aaec61d52cf001bea59054b3e26d2244a7f955f99bbd538652.jpg) +Figure 5: Pass@k for SFT and RL of Qwen-2.5-0.5B on GSM8K. The purple solid line measures Pass@K across SFT steps, while the dashed lines correspond to further training different checkpoints by Proximal Policy Optimization (PPO). While Pass@1 continues to improve, Pass@k for larger K can decrease even with RL. + +As shown in Figure 4, we observe a stark trend where longer SFT on Gemma-2-2B incrementally suffers from clear diversity collapse across all diversity metrics. Specifically, the model places most of its probability mass not only on one particular guess, but on a single reasoning trace, as evidenced by the reduced semantic and operation diversity. + +# 5.1 Theoretical Discussion of Diversity Collapse During SFT and RL + +We assess theoretically why diversity collapse tends to arise during SFT and RL training. Our analysis reveals that while SFT and RL operate on different principles, they share common pathways that lead to reduced generation diversity when optimizing for accuracy. + +Diversity Collapse during SFT Overparameterized models are well-known to exhibit overconfidence in their predictions, an effect that has been studied extensively in classification (Guo et al., 2017). In particular, the model's confidence towards the most likely class $P(\hat{y} = k_{\max} \mid x)$ is often much higher than the model's accuracy. In binary classification with linear models $f(x) = \sigma(\langle \boldsymbol{w}, \boldsymbol{x} \rangle)$ and linearly separable training data, gradient descent provably drives the norm of the weights to infinity, causing probabilities to collapse to 0 or 1 (Soudry et al., 2018). We demonstrate this in linear models in Appendix A. A similar phenomenon likely arises in large reasoning models, which may also be prone to overfitting during SFT, ultimately leading to overly confident solutions in spite of limited coverage over the space of traces (Cobbe et al., 2021). + +Diversity Collapse during RL We further prove why applying reinforcement learning to a low-diversity policy yields suboptimal results—and sometimes even exacerbates diversity collapse—in a discrete bandit setting (see Figure 5). In this scenario, we assume there exist $K$ equally good arms, corresponding to a set of successful strategies, and one bad arm that the policy should learn to avoid. We show two key results in this setting: + +1. Implicit Collapse of Policy Diversity without KL Regularization. Our analysis demonstrates that when standard reinforcement learning algorithms—REINFORCE and GRPO—are applied without KL regularization, the training dynamics inevitably lead to a collapse in output diversity. Although multiple arms (actions) are equally optimal, the updates become self-enforcing as training progresses. Once one of the good arms is randomly reinforced, its probability increases at the expense of the others, ultimately driving the policy to converge on a single-arm strategy (Theorem C.1). +2. Diversity Does Not Increase with KL Regularization. When KL regularization is incorporated to constrain the divergence from the initial policy in REINFORCE, the final policy no longer collapses into a single-arm strategy. However, the diversity of the converged policy cannot exceed the initial diversity. Concretely, we show that the probability distribution over the good arms remains proportional to the initial distribution when the RL algorithm converges (Theorem C.8). This explains why initializing with a diverse policy is critical for the generalization of reinforcement learning. + +# 6 Bias-Variance Tradeoff of Pass@K + +So far, we saw a mismatch in growth of Pass@1 and Pass@K during SFT and alluded to the impact of diversity collapse to Pass@K. We now formalize the relationship between Pass@1, Pass@K, and diversity collapse. Notably, we show that the upper bound of expected Pass@K over the test distribution can be decomposed into bias and variance quantities. + +# 6.1 Diversity Collapse leads to Bimodal Pass@1 Distribution + +Consider the expected $\mathrm{Pass}@\mathrm{K}$ over the entire test distribution $x, y \sim \mathcal{D}$ . By Jensen's inequality, we can derive a straightforward upper bound of expected $\mathrm{Pass}@\mathrm{K}$ that decomposes into the bias and variance of $1 - \rho_x$ (See proof in Appendix B). Note that the upper bound falls monotonically with larger bias and variance: + +$$ +\textbf {P r o p o s i t i o n 6 . 1 .} \mathbb {E} _ {x, y \sim \mathcal {D}} [ \operatorname {P a s s} @ \mathrm {K} (x) ] \leq 1 - ((\underbrace {\mathbb {E} _ {x , y \sim \mathcal {D}} [ 1 - \rho_ {x} ]} _ {\text {B i a s}}) ^ {2} + \underbrace {\operatorname {V a r} (\rho_ {x})} _ {\text {V a r i a n c e}}) ^ {k / 2} +$$ + +In Figure 6b, we plot the distribution of error $1 - \rho_{x}$ , estimated using 100 sampled traces, over GSM8K test examples. We notice two trends with longer SFT. First, bias decreases, i.e., the expected error shifts towards 0. However, the distribution becomes increasingly bimodal with the densities converging towards the two extremes 0 and 1. As a result, the variance increases with longer SFT. This increase in variance directly explains the drop in Pass@k. + +The bimodality of the $1 - \rho_{x}$ distribution means that the Pass@1 of any test problem is either very high or very low. Interestingly, one explanation for the increased bimodality of the distribution of $1 - \rho_{x}$ is in fact when models suffer from diversity collapse. In other words, a particular guess to be oversampled for each test problem. If the model places high probability on an incorrect guess, Pass@1 is very low. On the other hand, if the model places high probability on the correct guess, Pass@1 is very high. We illustrate this relationship in Figure 6a. All in all, Pass@K can be improved in two ways - either reduce bias by improving Pass@1 or reduce variance by increasing diversity. + +![](images/105ec9b2f770f3fd0623182efd6b919cba9dbb10c81c24b7a52fedf2e526985a.jpg) +(a) + +![](images/401631f09a461d1e42f689302a470ca43729d0d0654d63edcc98eccaf75cd6ba.jpg) +(b) +Figure 6: Histogram of error $1 - \rho_{x}$ of Gemma-2-2B SFT checkpoints across GSM8k test. SFT progressively decreases bias but increases variance of error i.e., $1 - \mathrm{Pass}@\mathrm{l}$ , across the test distribution, causing Pass@K to fall. Applying Wise-FT reduces both bias and variance, but temperature scaling trades off decreasing variance with increased bias. + +# 6.2 WiSE-FT vs. Diverse Decoding + +While we've proposed inducing diversity by WiSE-FT, another common alternative for inducing diversity is temperature scaling the logits. High temperature smoothens the logits allowing the model to more likely sample low probability tokens. In Figure 1, we see that while high temperatures indeed improve Pass@K, the Pass@K at any SFT timestep notably never reaches the Pass@K of our final WiSE-FT model. If temperature scaling also increases diversity, why does WiSE-FT strictly outperform sampling with high temperature? In Figure 6b, we plot the distribution of $1 - \rho_{x}$ if we sample from the last SFT checkpoint with high temperature $T = 1.5$ . As expected, we see that the model reasons more diversely. This smoothens the bimodal peaks and reduces the variance. However, the average accuracy of the model generations also degrades, causing the bias goes back up. We suspect bias-variance tradeoff is inherent in diversity-inducing decoding approaches. For example, min-p (Nguyen et al., 2024) combines temperature scaling with adaptive thresholding to not sample outlier tokens. However, this additional control is unable to reduce bias (Figure 16). Surprisingly, WiSE-FT uniquely manages to reduce both bias and variance. + +# 7 Discussion + +In this work, we investigated the phenomenon of diversity collapse during the training of reasoning models. Our analysis reveals that standard SFT and RL pipelines can deteriorate in Pass@ $K$ due to the convergence of model generations toward a single reasoning trace. We demonstrated that WiSE-FT, which interpolates between early and late SFT checkpoints, significantly improves both Pass@1 and Pass@ $K$ across multiple math datasets and model scales. This is unlike alternative approaches such as temperature scaling or early + +stopping, which face an inherent tradeoff. Furthermore, improving on these metrics corresponded with better adaptation to test-time scaling and RL. But other limitations of WiSE-FT may exist at larger scale, which we leave for future work. + +Overall, our work reveals the importance of maintaining diversity in reasoning models. Current decoding strategies (e.g., min-p, nucleus, and top-k) are still unable to fully extract a model's capabilities. We estimate that a significant gap, of tens of percent, remains compared to the optimal decoding strategy for Pass@K, i.e., top-K sampling over the model's marginal answer distribution $P(\hat{y} \mid x)$ (see Table 1 and Appendix G). We encourage future works to address downstream limitations more carefully in earlier stages of the training pipeline. + +
MethodPass@2Pass@4
Nucleus0.570.67
Min-p0.570.67
Top-k0.560.67
Optimal0.760.83
+ +Table 1: Best Pass@k of Gemma on GSM8k across SFT checkpoints + +# 8 Acknowledgements + +We'd like to thank Aviral Kumar, Sean Welleck, Amrith Setlur and Yiding Jiang for insightful discussions about test-time scaling and reinforcement learning. We'd also like to thank Alex Li, Sachin Goyal, and Jacob Springer for their meaningful contribution to our figures and literature review. We gratefully acknowledge support from Apple, Google, Cisco, OpenAI, NSF, Okawa foundation, the AI2050 program at Schmidt Sciences (Grant #G2264481), and Bosch Center for AI. + +# References + +AlphaProof and AlphaGeometry teams. Ai achieves silver-medal standard solving international mathematical olympiad problems, jul 2024. URL https://deepmind.google/discover/blog/ai-solves-imo-problems-at-silver-medal-level/. +Edward Beeching, Lewis Tunstall, and Sasha Rush. Scaling test-time compute with open models, 2024. URL https://huggingface.co/spaces/HuggingFaceH4/blogpost-scaling-test-time-compute. +Jeff Bilmes. Submodularity in machine learning and artificial intelligence. arXiv preprint arXiv:2202.00132, 2022. +Feng Chen, Allan Raventos, Nan Cheng, Surya Ganguli, and Shaul Druckmann. Rethinking fine-tuning when scaling test-time compute: Limiting confidence improves mathematical reasoning. arXiv preprint arXiv:2502.07154, 2025. +Guoxin Chen, Minpeng Liao, Chengxi Li, and Kai Fan. Alphamath almost zero: Process supervision without process, 2024. URL https://arxiv.org/abs/2405.03553. +Yinlam Chow, Guy Tennenholtz, Izzeddin Gur, Vincent Zhuang, Bo Dai, Sridhar Thiagarajan, Craig Boutilier, Rishabh Agarwal, Aviral Kumar, and Aleksandra Faust. Inference-aware fine-tuning for best-of-n sampling in large language models. arXiv preprint arXiv:2412.15287, 2024a. +Yinlam Chow, Guy Tennenholtz, Izzeddin Gur, Vincent Zhuang, Bo Dai, Sridhar Thiagarajan, Craig Boutilier, Rishabh Agarwal, Aviral Kumar, and Aleksandra Faust. Inference-aware fine-tuning for best-of-n sampling in large language models, 2024b. URL https://arxiv.org/abs/2412.15287. +Tianzhe Chu, Yuexiang Zhai, Jihan Yang, Shengbang Tong, Saining Xie, Dale Schuurmans, Quoc V. Le, Sergey Levine, and Yi Ma. Sft memorizes, rl generalizes: A comparative study of foundation model post-training, 2025. URL https://arxiv.org/abs/2501.17161. +Karl Cobbe, Vineet Kosaraju, Mohammad Bavarian, Mark Chen, Heewoo Jun, Lukasz Kaiser, Matthias Plappert, Jerry Tworek, Jacob Hilton, Reiichiro Nakano, Christopher Hesse, and John Schulman. Training verifiers to solve math word problems, 2021. URL https://arxiv.org/abs/2110.14168. + +Chuan Guo, Geoff Pleiss, Yu Sun, and Kilian Q Weinberger. On calibration of modern neural networks. In International conference on machine learning, pp. 1321-1330. PMLR, 2017. +Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948, 2025. +Dan Hendrycks, Collin Burns, Saurav Kadavath, Akul Arora, Steven Basart, Eric Tang, Dawn Song, and Jacob Steinhardt. Measuring mathematical problem solving with the math dataset, 2021. URL https://arxiv.org/abs/2103.03874. +Ari Holtzman, Jan Buys, Li Du, Maxwell Forbes, and Yejin Choi. The curious case of neural text degeneration, 2020. URL https://arxiv.org/abs/1904.09751. +Audrey Huang, Adam Block, Dylan J Foster, Dhruv Rohatgi, Cyril Zhang, Max Simchowitz, Jordan T Ash, and Akshay Krishnamurthy. Self-improvement in language models: The sharpening mechanism. arXiv preprint arXiv:2412.01951, 2024. +Yifei Li, Zeqi Lin, Shizhuo Zhang, Qiang Fu, Bei Chen, Jian-Guang Lou, and Weizhu Chen. Making large language models better reasoners with step-aware verifier, 2023. URL https://arxiv.org/abs/2206.02336. +Ziniu Li, Congliang Chen, Tian Xu, Zeyu Qin, Jiancong Xiao, Zhi-Quan Luo, and Ruoyu Sun. Preserving diversity in supervised fine-tuning of large language models. In The Thirteenth International Conference on Learning Representations, 2025. URL https://openreview.net/forum?id=NQEe7B7bSw. +Hunter Lightman, Vineet Kosaraju, Yura Burda, Harri Edwards, Bowen Baker, Teddy Lee, Jan Leike, John Schulman, Ilya Sutskever, and Karl Cobbe. Let's verify step by step. arXiv preprint arXiv:2305.20050, 2023. +Niklas Muennighoff, Zitong Yang, Weijia Shi, Xiang Lisa Li, Li Fei-Fei, Hannaneh Hajishirzi, Luke Zettlemoyer, Percy Liang, Emmanuel Candès, and Tatsunori Hashimoto. s1: Simple test-time scaling, 2025. URL https://arxiv.org/abs/2501.19393. +Minh Nguyen, Andrew Baker, Clement Neo, Allen Roush, Andreas Kirsch, and Ravid Shwartz-Ziv. Turning up the heat: Min-p sampling for creative and coherent llm outputs, 2024. URL https://arxiv.org/abs/2407.01082. +Theo X. Olausson, Jeevana Priya Inala, Chenglong Wang, Jianfeng Gao, and Armando Solar-Lezama. Is self-repair a silver bullet for code generation?, 2024. URL https://arxiv.org/abs/2306.09896. +Pier Giuseppe Sessa, Robert Dadashi, Léonard Hussenot, Johan Ferret, Nino Vieillard, Alexandre Ramé, Bobak Shariari, Sarah Perrin, Abe Friesen, Geoffrey Cideron, Sertan Girgin, Piotr Stanczyk, Andrea Michi, Danila Sinopalnikov, Sabela Ramos, Amélie Héliou, Aliaksei Severyn, Matt Hoffman, Nikola Momchev, and Olivier Bachem. Bond: Aligning llms with best-of-n distillation, 2024. URL https://arxiv.org/abs/2407.14622. +Amrith Setlur, Chirag Nagpal, Adam Fisch, Xinyang Geng, Jacob Eisenstein, Rishabh Agarwal, Alekh Agarwal, Jonathan Berant, and Aviral Kumar. Rewarding progress: Scaling automated process verifiers for llm reasoning, 2024. URL https://arxiv.org/abs/2410.08146. +Louis Shao, Stephan Gouws, Denny Britz, Anna Goldie, Brian Strope, and Ray Kurzweil. Generating high-quality and informative conversation responses with sequence-to-sequence models. arXiv preprint arXiv:1701.03185, 2017. +Charlie Snell, Jaehoon Lee, Kelvin Xu, and Aviral Kumar. Scaling llm test-time compute optimally can be more effective than scaling model parameters, 2024. URL https://arxiv.org/abs/2408.03314. +Yuda Song, Hanlin Zhang, Carson Eisenach, Sham Kakade, Dean Foster, and Udaya Ghai. Mind the gap: Examining the self-improvement capabilities of large language models. arXiv preprint arXiv:2412.02674, 2024. + +Daniel Soudry, Elad Hoffer, Mor Shpigel Nacson, Suriya Gunasekar, and Nathan Srebro. The implicit bias of gradient descent on separable data. Journal of Machine Learning Research, 19(70):1-57, 2018. +Ashwin K Vijayakumar, Michael Cogswell, Ramprasath R. Selvaraju, Qing Sun, Stefan Lee, David Crandall, and Dhruv Batra. Diverse beam search: Decoding diverse solutions from neural sequence models, 2018. URL https://arxiv.org/abs/1610.02424. +Xuezhi Wang, Jason Wei, Dale Schuurmans, Quoc Le, Ed Chi, Sharan Narang, Aakanksha Chowdhery, and Denny Zhou. Self-consistency improves chain of thought reasoning in language models, 2023. URL https://arxiv.org/abs/2203.11171. +Mitchell Wortsman, Gabriel Ilharco, Jong Wook Kim, Mike Li, Simon Kornblith, Rebecca Roelofs, Raphael Gontijo-Lopes, Hannaneh Hajishirzi, Ali Farhadi, Hongseok Namkoong, and Ludwig Schmidt. Robust fine-tuning of zero-shot models, 2022. URL https://arxiv.org/abs/2109.01903. +Yangzhen Wu, Zhiqing Sun, Shanda Li, Sean Welleck, and Yiming Yang. Inference scaling laws: An empirical analysis of compute-optimal inference for problem-solving with language models. arXiv preprint arXiv:2408.00724, 2024. +Wei Xiong, Hanning Zhang, Nan Jiang, and Tong Zhang. An implementation of generative prm. https://github.com/RLHFlow/RLHF-Reward-Modeling, 2024. +Edward Yeo, Yuxuan Tong, Morry Niu, Graham Neubig, and Xiang Yue. Demystifying long chain-of-thought reasoning in llms, 2025. URL https://arxiv.org/abs/2502.03373. +Longhui Yu, Weisen Jiang, Han Shi, Jincheng Yu, Zhengying Liu, Yu Zhang, James T Kwok, Zhenguo Li, Adrian Weller, and Weiyang Liu. Metamath: Bootstrap your own mathematical questions for large language models. arXiv preprint arXiv:2309.12284, 2023. +Dun Zhang, Jiacheng Li, Ziyang Zeng, and Fulong Wang. Jasper and stella: distillation of sota embedding models. arXiv preprint arXiv:2412.19048, 2024a. +Yiming Zhang, Avi Schwarzschild, Nicholas Carlini, Zico Kolter, and Daphne Ippolito. Forcing diffuse distributions out of language models, 2024b. URL https://arxiv.org/abs/2404.10859. + +# A SFT in Binary Classification + +Data and Model Setup We train a linear classifier $f(\pmb{x}) = \langle \pmb{w}, \pmb{x} \rangle$ from random initialization over a binary Gaussian mixture distribution: + +$$ +x \mid y \sim \mathcal {N} (y \boldsymbol {\mu}, I ^ {d \times d}) \tag {3} +$$ + +$$ +y \in \{1, - 1 \} \text {u n i f o r m l y} \tag {4} +$$ + +Given a model, we sample predictions, namely $\hat{y} = 1$ with probability $\sigma (\langle \pmb {w},\pmb {x}\rangle) = (1 + \exp (-\langle \pmb {w},\pmb {x}\rangle))^{-1}$ , or $\hat{y} = 0$ . Then, per-example Pass@1 is equal to $\rho_{x} = \sigma (y\cdot \langle \pmb {w},\pmb {x}\rangle)$ . Similarly, the expected Pass@k is equal to $1 - (1 - \rho_{x})^{k}$ . + +In our experiment, we train an overparametrized linear classifier over binary Gaussian data mixture $x \mid y \sim \mathcal{N}(y \cdot \frac{1}{\sqrt{d}} \mathbf{1}, \frac{1}{2} I)$ where $y = \{-1, 1\}$ and $d = 1000$ . We then evaluate $\rho_x$ of 400 test samples. As training progresses, the distribution of $\rho_x$ over the test data becomes bimodal due to the norm of $w$ monotonically increasing once it separates the training examples. Similarly, we observe that this leads to a drop in Pass@k while Pass@1 continues to improve. + +![](images/110fab68254d7edd76626c12dee15bae4c5510f3d1620d88c62cdb6cd3e849b2.jpg) + +![](images/136c97bd309561e7ca54a0ca2069ad3f0b521147a93b614b1fb712de45f0c740.jpg) +Figure 8: Pass@k across Training in Binary Classification + +![](images/235ace59d9f3a9fd4138d33eda84ec30aa842d50403a6325e09b14c22038f792.jpg) + +![](images/bd859f21719d1d07b48f5e32d3cf6033dc8039de333fc86dee2565d05cfa3961.jpg) + +![](images/3b4b375b012d324a664828356e9ada89ab2d796bd0c978efdd3c6ccc0233b487.jpg) +Figure 7: Weight Norm + +![](images/09a06b40fa4ea2f11c53a1fb0b61297195528e6be636a1b05e4aa3162e56bcea.jpg) + +![](images/672c0c412effe3f158ea40afc7de6980e89697915f6ff25df8aa79757cb6e4a2.jpg) +Figure 9: Histogram of $\rho_{x}$ across training steps + +![](images/9314a20b371980d52ecbb3503a73f93a81b6bb2a673d0623309bc41fbb73253e.jpg) + +![](images/525268009e6075c1325653ae3ac1f4d2d550342360d9da06d7710798d251f1d6.jpg) + +![](images/5a57a6efe0c941d086713a9c86373744f480001ca15dc55fec49bde54b75585e.jpg) + +![](images/b1e638e906973e22c65e08bce79f7b716ac0fa8700587808cf2a8fb6a77abce6.jpg) + +# B Expected Pass@k + +Proposition B.1. + +$$ +\mathbb {E} _ {x, y \sim \mathcal {D}} \left[ \mathrm {P a s s @ K} (x) \right] \leq 1 - \left(\left(\mathbb {E} _ {x, y \sim \mathcal {D}} [ 1 - \rho_ {x} ]\right) ^ {2} + \mathrm {V a r} (\rho_ {x})\right) ^ {k / 2} +$$ + +Proof. + +$$ +\begin{array}{l} \mathbb {E} \left[ (1 - \rho_ {x}) ^ {k} \right] \geq \mathbb {E} \left[ (1 - \rho_ {X}) ^ {2} \right] ^ {k / 2} (5) \\ = \left(1 - 2 \mathbb {E} \left[ \rho_ {x} \right] + \mathbb {E} \left[ \rho_ {x} ^ {2} \right]\right) ^ {k / 2} (6) \\ = \left(\left(1 - 2 \mathbb {E} [ \rho_ {x} ] + \mathbb {E} [ \rho_ {x} ] ^ {2}\right) + \left(\mathbb {E} \left[ \rho_ {x} ^ {2} \right] - \mathbb {E} [ \rho_ {x} ] ^ {2}\right)\right) ^ {k / 2} (7) \\ = \left(\left(1 - \mathbb {E} [ \rho_ {x} ]\right) ^ {2} + \operatorname {V a r} (\rho_ {x})\right) ^ {k / 2} (8) \\ \end{array} +$$ + +![](images/d6563642ee50cd803af3a0e79797353c9f1d469062b359c5cc621ad0702e4063.jpg) + +# C RL Theory + +# C.1 Overview + +We will prove that in a discrete bandit setting with $K$ equally good arms that is the best arm, both REINFORCE and GRPO without KL regularization will eventually collapse into a single-arm strategy. + +We will further prove that, with KL regularization with respect to the initial policy, the converged policy of REINFORCE have the same action distribution as the initial policy when constrained on the set of best arms. Therefore, diversity within good actions will not increase through REINFORCE training. + +# C.2 Notations and Setup + +Formally we consider the following setting. We consider a $K + 1$ -armed bandit, with arms $\{1,2,\dots ,K + 1\}$ . Arms $1,\ldots ,K$ are "good," each yielding reward 1, and the other arm is "bad," yielding reward 0. We use a softmax parameterization: + +$$ +p _ {i} = \frac {e ^ {\theta_ {i}}}{\sum_ {j = 1} ^ {K + 1} e ^ {\theta_ {j}}}, \quad i = 1, \dots , K + 1. +$$ + +to denote the action distribution. We will use $\theta_i^{(t)}$ to denote the parameter at step $t$ . + +It is standard to consider using the KL divergence between the current policy with a reference policy (which we set as $p_0$ here) as a regularization term. + +$$ +\mathrm {K L} (p ^ {(t)} | p ^ {(0)}) = \sum_ {i = 1} ^ {K + 1} p _ {i} ^ {(t)} \log \frac {p _ {i} ^ {(t)}}{p _ {i} ^ {(0)}} +$$ + +For REINFORCE, we will consider the following training setup. At step $t$ : + +1. We sample an arm $I_{t}$ according to $p(\cdot) = (p_1^{(t)},\dots ,p_{K + 1}^{(t)})$ and receive reward $r_t$ +2. We update using policy gradient. + +$$ +\theta_ {i} ^ {(t + 1)} = \theta_ {i} ^ {(t)} + \eta r _ {t} \nabla_ {\theta_ {i}} (\log p _ {I _ {t}} ^ {(t)}) - \eta \beta \nabla_ {\theta_ {i}} \mathrm {K L} (p ^ {(t)} | p ^ {(0)}), i = 1, \dots , K + 1, +$$ + +where $\eta > 0$ is the step size and $\beta$ is the hyperparameter controlling the strength of KL regularization. + +For GRPO, we will consider the following simplified training setup. This is equivalent to the empirical version of GRPO with online sampling. + +1. Sample $G$ arms $\{I_t^{(1)},\dots ,I_t^{(G)}\}$ i.i.d. from the current policy $p(\cdot)$ and receive rewards $r_t^{(g)}$ . +2. Compute + +$$ +\mu_ {t} = \frac {1}{G} \sum_ {g = 1} ^ {G} r _ {t} ^ {(g)}, \quad \sigma_ {t} = \sqrt {\frac {1}{G} \sum_ {g = 1} ^ {G} \left(r _ {t} ^ {(g)} - \mu_ {t}\right) ^ {2}}, +$$ + +and define the normalized advantage + +$$ +\begin{array}{r} \tilde {r} _ {t} ^ {(g)} = \left\{ \begin{array}{l l} \frac {r _ {t} ^ {(g)} - \mu_ {t}}{\sigma_ {t}}, & \sigma_ {t} \neq 0, \\ 0, & \sigma_ {t} = 0. \end{array} \right. \end{array} +$$ + +We will skip the update if $\sigma_t = 0$ . + +3. Update each parameter $\theta_{i}$ via + +$$ +\theta_ {i} \gets \theta_ {i} + \frac {\eta}{G} \sum_ {g = 1} ^ {G} \widehat {r} _ {t} ^ {(g)} \nabla_ {\theta_ {i}} (\log p _ {I _ {t} ^ {(g)}} ^ {(t)}) - \eta \beta \nabla_ {\theta_ {i}} \mathrm {K L} (p ^ {(t)} | p ^ {(0)}). i = 1, \ldots , K + 1, +$$ + +# C.3 Implicit Diversity Collapse without KL regularization + +Theorem C.1 (Collapse to Deterministic Policy). Under REINFORCE or GRPO updates without KL regularization $(\beta_0 = 0)$ , given a sufficient small $\eta$ , with probability 1: + +$$ +\limsup_{t\to \infty}\max_{i\in [K]}p_{i}^{(t)} = 1. +$$ + +Thus, the policy collapses to a single-arm strategy during training. + +Proof. The proof is two-fold. + +Using Lemma C.3 and C.4, we can show that bad arm probability diminishes, + +$$ +\lim _ {t \to \infty} p _ {K + 1} ^ {(t)} = 0 +$$ + +We will then define a property named Self-enforcing Stochastic + +Definition C.2 (Self-enforcing Stochastic Policy Update Rule). We define three properties of policy update rule that will lead to diversity collapse + +1. The policy update takes the form of $\sum_{k=1}^{B} A_k \nabla \log p_i(\theta_{i_k})$ where $i_k$ is the $k$ -th sampled arm in the batch and $A_k$ is a function determined by (i) the sum of reward $\sum_{i=1}^{K} r_{i_k}$ with in the batch; (ii) the reward $r_{i_k}$ and (iii) the batch size $B$ . +2. A policy update rule is said to be self-enforcing, if $\mathbb{E}[\theta_i^{(t + 1)} - \theta_i^{(t)}]$ is monotonous with $\theta_{i}^{(t)}$ for all $i\in [K]$ and $t$ . Further $\mathbb{E}[\theta_i^{(t + 1)} - \theta_i^{(t)}]$ is non-positive if $i\geq K + 1$ and is non-negative if $i\leq K$ . +3. A policy update rule is said to be self-enforcing stochastic if it is self-enforcing and there exists constants $C_1, C_2 > 0$ such that for any $\epsilon > 0$ , whenever the current policy satisfies $\max_{i \in [K]} p_i^{(t)} \in [1/2K, 1 - \epsilon]$ (i.e., no single good arm dominates), for $i^* = \arg \max_{i \in [K]} p_i^{(t)}$ the conditional second moment of the parameter updates for every arm $i \in [K + 1]$ and $i \neq i^*$ satisfies: + +$$ +\mathbb {E} \left[ \left(\left(\theta_ {i} ^ {(t + 1)} - \theta_ {i} ^ {(t)}\right) - \left(\theta_ {i ^ {*}} ^ {(t + 1)} - \theta_ {i ^ {*}} ^ {(t)}\right)\right) ^ {2} \mid \theta^ {(t)} \right] \geq C _ {1} \epsilon^ {2}. +$$ + +and + +$$ +| \theta_ {i} ^ {(t + 1)} - \theta_ {i} ^ {(t)} | < C _ {2} +$$ + +Lemma C.5 shows that for any self-enforcing stochastic policy update rule, the final policy collapses into a single-arm policy. + +Using Lemma C.6 and C.7, we can show that REINFORCE and GRPO are self-enforcing stochastic policy update rules when bad arm probability is lower than $1 / 2$ . The proof is then complete. + +Lemma C.3 (Bad Arm Probability Diminishes Using REINFORCE). Under the REINFORCE algorithm without KL regularization $(\beta = 0)$ , $\lim_{t\to \infty}p_{K + 1}^{(t)} = 0$ almost surely. + +Proof. We can first simplify the REINFORCE update rule to + +$$ +\theta_ {i} ^ {(t + 1)} = \theta_ {i} ^ {(t)} + \eta r _ {t} (\mathbf {1} (I _ {t} = i) - p _ {i} ^ {(t)}), \quad i = 1, \dots , K + 1. +$$ + +Noted that $\sum_{i}\theta_{i}^{(t)}$ will not change with $t$ , WLOG, assume + +$$ +\sum_ {i} \theta_ {i} ^ {(t)} = 0. +$$ + +Because $r_{K + 1} = 0$ , we can then assume without loss of generality, for all $t$ , $I_t \leq K$ . + +This then suggests that + +$$ +\theta_ {K + 1} ^ {(t + 1)} = \theta_ {K + 1} ^ {(t)} - \eta p _ {K + 1} ^ {(t)} +$$ + +monotonically decrease. + +For any $\epsilon$ , if $p_{K + 1}^{(t)} > \epsilon$ holds for infinite $t$ , then there exists $t_0$ , where $\theta_{K + 1}^t < \log \epsilon$ for any $t > t_0$ . For any $t > t_0$ , there exists $i \in [K]$ , such that $\theta_i^{(t)} > 0$ . This then suggests that + +$$ +p _ {K + 1} ^ {(t)} \leq \exp (\theta_ {K + 1} ^ {(t)} - \theta_ {i} ^ {(t)}) \leq \epsilon . +$$ + +This leads to a contradiction. The proof is then complete. + +Lemma C.4 (Bad Arm Probability Diminishes Using GRPO). Under the GRPO algorithm without KL regularization $(\beta = 0), \lim_{t \to \infty} p_{K+1}^{(t)} = 0$ almost surely. + +Proof. For GRPO, we can show that $\tilde{r}_t^{(g)}$ is negative iff $I_t^{(g)} = K + 1$ . Therefore, we can show that $\theta_{K+1}^{(t)}$ monotonically decreases, similar to the case in REINFORCE. + +If $p_{K+1}^{(t)} > \epsilon$ holds for some $t$ , one can prove that $\theta_{K+1}^{(t)}$ will decrease by a constant depending on $\epsilon$ in expectation. Therefore, following the same line as in C.3, we can prove that $\lim_{t \to \infty} p_{K+1}^{(t)} = 0$ almost surely. + +Lemma C.5 (Collapse Happens for All Self-enforcing Stochastic Policy Update Rule). Consider a policy update process that is self-enforcing stochastic (Definition C.2), then $\lim \sup_{t\to \infty}\max_{i\in [K]}p_i^{(t)} = 1$ almost surely. + +Proof. We will inductively prove that for different $K$ the following induction hypotheses, for any $\epsilon, \delta > 0$ , there exists $T_{\epsilon, \delta, K} > 0$ , + +$$ +\Pr \left(\max _ {t < T _ {\epsilon , \delta , K}} \max _ {i \in [ K ]} p _ {i} ^ {(t)} < 1 - \epsilon\right) < \delta . +$$ + +We first consider the case where $K = 2$ . + +Consider the stopping time, + +$$ +\tau_ {\epsilon} = \arg \min _ {t} \max _ {i \in [ K ]} p _ {i} ^ {(t)} > 1 - \epsilon +$$ + +For any $\mathcal{I} = \{1,2\}$ , define $\Delta_{\mathcal{I}}^{t} = \max_{j\in [K]}\theta_{j}^{t} - \min_{j\in \mathcal{I}}\theta_{i}^{t}$ . + +Assume $\theta_{i*}^t = \max_{j\in [K]}\theta_j^t$ , because $|\mathcal{I}|\geq 2$ , there exists $i\neq i^{*}$ , $\min_{j\in \mathcal{I}}\theta_i^t >0$ . We will show three properties of $\Delta_I^t$ + +First $\Delta_{\mathcal{I}}^{(t)}$ is a submartingale defined on the filtration of the distribution of $\theta^{(t)}$ because + +$$ +\mathbb {E} [ \Delta_ {\mathcal {I}} ^ {(t)} | \theta_ {t} ] - \Delta_ {\mathcal {I}} ^ {(t - 1)} > \mathbb {E} [ (\theta_ {i ^ {*}} ^ {t + 1} - \theta_ {i ^ {*}} ^ {t}) - (\theta_ {i} ^ {t + 1} - \theta_ {i} ^ {t}) | \theta_ {t} ] > 0. +$$ + +as the policy is self-enforcing. + +Further $\Delta_{\mathcal{I}}^{(t)}$ has bounded growth of $2C_2$ as + +$$ +\begin{array}{l} | \max _ {j \in [ K ]} \theta_ {j} ^ {t + 1} - \max _ {j \in [ K ]} \theta_ {j} ^ {t} | < C _ {2}. \\ \bigl|\min_{j\in \mathcal{I}}\theta_{j}^{t + 1} - \max_{j\in \mathcal{I}}\theta_{j}^{t}\bigr| < C_{2}. \\ \end{array} +$$ + +Furthermore, the second-momentum of $\Delta_{\mathcal{I}}^{(t)}$ needs to increase with $t$ by a constant for any $t < \tau_{\epsilon}$ . + +$$ +\begin{array}{l} \mathbb {E} \left[ \left(\Delta_ {\mathcal {I}} ^ {(t + 1)}\right) ^ {2} \mid \theta_ {t} \right] \geq \left(\Delta_ {\mathcal {I}} ^ {(t)}\right) ^ {2} + \mathbb {E} \left[ \left(\Delta_ {\mathcal {I}} ^ {(t + 1)} - \Delta_ {\mathcal {I}} ^ {(t)}\right)\right) ^ {2} \mid \theta_ {t} ] \\ \geq \left(\Delta_ {I} ^ {(t)}\right) ^ {2} + C _ {1} \epsilon^ {2}. \\ \end{array} +$$ + +When $t < \tau_{\epsilon}$ , it holds that $\Delta_{\mathcal{I}}^{(t)} < \log \frac{2}{\epsilon}$ , otherwise we can prove that + +$$ +\max _ {i, j \in \{1, 2 \}} p _ {i} / p _ {j} = \exp (\Delta_ {\mathcal {I}} ^ {(t)}) > \frac {2 - 2 \epsilon}{\epsilon}. \Rightarrow \max _ {i \in \{1, 2 \}} p _ {i} > 1 - \epsilon . +$$ + +This is a contradiction. Further, by Martingale inequality, we have that + +$$ +\mathbb {E} [ \left(\Delta^ {\min \{t, \tau_ {\epsilon} \}}\right) ^ {2} ] > \mathbb {E} [ \left(\Delta^ {0}\right) ^ {2} ] + C _ {1} \epsilon^ {2} \mathbb {E} [ \min \{t, \tau_ {\epsilon} \} ] +$$ + +Further, as $\Delta^t$ has bounded growth, we have that + +$$ +\mathbb {E} \left[ \left(\Delta^ {\min \{t, \tau_ {\epsilon} \}}\right) ^ {2} \right] < (\log \frac {2}{\epsilon} + 2 C _ {2}) ^ {2}. +$$ + +This implies $\mathbb{E}[\min \{t,\tau_{\epsilon}\}] < \frac{(\log\frac{2}{\epsilon} + 2C_2)^2}{C_1\epsilon^2}$ for all $t$ , this implies + +$$ +\mathbb {E} [ \tau_ {\epsilon} ] < \frac {(\log \frac {2}{\epsilon} + 2 C _ {2}) ^ {2}}{C _ {1} \epsilon^ {2}}. +$$ + +Further, by Markov inequality, if we choose + +$$ +T _ {\epsilon , \delta , 2} = \frac {(\log \frac {2}{\epsilon} + 2 C _ {2}) ^ {2}}{C _ {1} \epsilon^ {2} \delta}. +$$ + +then, + +$$ +\Pr \left(\tau_ {\epsilon} > T _ {\epsilon , \delta , 2}\right) < \frac {\mathbb {E} \left[ \tau_ {\epsilon} \right]}{T _ {\epsilon , \delta , 2}} < \delta . +$$ + +This concludes the proof for $K = 2$ . + +Now assuming the result holds for $K - 1$ and consider the case for $K$ , First, we choose a small enough constant $C_{\delta ,\epsilon ,K,N} > 0$ , such that when $p_{K - 1}^{(0)} < C_{\delta ,\epsilon ,K,N}$ , the following two random processes are close: + +- Running the algorithm for $N$ steps on the $K$ arms bandit yields $\theta_i^{(t)}, i \in [K]$ +- Running the algorithm for $N$ steps on a $K - 1$ arms bandit yields $\tilde{\theta}_i^{(t)}, i \in [K - 1]$ with $\tilde{\theta}_i^{(0)} = \theta_i^{(0)}, i < K - 1$ and $\tilde{\theta}_{K - 1}^{(0)} = \theta_K(0)$ + +and there exists a joint measure on $\theta$ and $\tilde{\theta}$ such that + +$$ +\forall i \in [ K - 2 ], t < N, \Pr (| p _ {i} ^ {t} - \tilde {p} _ {i} ^ {t} | > \epsilon / 2) < \delta / 6. +$$ + +$$ +\operatorname * {P r} (| p _ {K} ^ {t} - \tilde {p} _ {K - 1} ^ {t} | > \epsilon / 2) < \delta / 6. +$$ + +$$ +\Pr \left(\left| p _ {K} ^ {t} - p _ {K} ^ {0} \right| > \epsilon / 2\right) < \delta / 6. +$$ + +This joint measure is constructed by choosing the corresponding arm for two process at each sampling step as long as the sampled arm is not $K$ and uses the uniform convergence on $\nabla \log_{\theta} p_i$ . Now following the same argument at $K = 2$ , we can show that there exists $\tilde{T}_{\epsilon, \delta, K}$ such that + +$$ +\operatorname * {P r} (\exists t < \tilde {T} _ {\epsilon , \delta , K}, \min _ {t \in [ K ]} p _ {t} < C _ {\delta , \epsilon , K, T _ {\epsilon / 2, \delta / 2, K - 1}}) > 1 - \delta / 2. +$$ + +Then we can invoke the induction hypothesis and uses the coupling shown above to show that if we choose $T_{\epsilon, \delta, K} = \tilde{T}_{\epsilon, \delta, K} + T_{\epsilon/2, \delta/2, K-1}$ , then there exists a time step that one arm has probability higher than $1 - \epsilon$ with probability at least $1 - \delta$ . + +![](images/ca1b539d95c5fb460f2be006fa7134f3f1ba977fdaf654bf1420e28cac93d5b2.jpg) + +Lemma C.6. The REINFORCE algorithm without KL regularization ( $\beta = 0$ ) is self-enforcing stochastic (Definition C.2) once $p_{K+1}^{(t)} < 1/2$ . + +Proof. The REINFORCE algorithm is self-enforcing because + +$$ +\mathbb {E} [ \theta_ {i} ^ {(t + 1)} - \theta_ {i} ^ {(t)} ] = \eta p _ {i} (r _ {i} - \sum_ {j \in [ K + 1 ]} p _ {j} r _ {j}). +$$ + +Further, + +$$ +| \theta_ {i} ^ {(t + 1)} - \theta_ {i} ^ {(t)} | \leq 1 +$$ + +and if we consider the distribution of $\Delta_{i,i^*,t} = \frac{\left(\theta_i^{(t + 1)} - \theta_i^{(t)}\right) - \left(\theta_{i^*}^{(t + 1)} - \theta_{i^*}^{(t)}\right)}{\eta}$ , it holds that + +$$ +\Delta_ {i, i ^ {*}, t} = r _ {I _ {t}} \left(\mathbf {1} (i = I _ {t}) - \mathbf {1} (i ^ {*} = I _ {t}) - p _ {i} + p _ {i ^ {*}}\right) +$$ + +$$ +\Pr \left(\Delta_ {i, i ^ {*}, t} = - 1 - p _ {i} + p _ {i} ^ {*}\right) \geq \Pr \left(I _ {t} = i ^ {*}\right) = p _ {i ^ {*}} +$$ + +Therefore + +$$ +\begin{array}{l} \mathbb {E} \left[ \Delta_ {i, i ^ {*}, t} ^ {2} \right] \geq p _ {i ^ {*}} \left(- 1 - p _ {i} + p _ {i} ^ {*}\right) ^ {2} \\ \geq p _ {i ^ {*}} (1 - p _ {i ^ {*}}) ^ {2} \geq \frac {\epsilon^ {2}}{2 K}. \\ \end{array} +$$ + +This then concludes the proof with $C_1 = \eta / 2K$ and $C_2 = \eta$ . + +Lemma C.7. The GRPO algorithm without KL regularization ( $\beta = 0$ ) is self-enforcing stochastic (Definition C.2) once $p_{K+1}^{(t)} < 1/2$ . + +Proof. The GRPO algorithm is self-enforcing because + +$$ +\mathbb {E} [ \theta_ {i} ^ {(t + 1)} - \theta_ {i} ^ {(t)} ] = \eta \mathbb {E} [ \tilde {r} _ {t} ^ {(g)} (\mathbf {1} (I _ {t} ^ {(g)} = i) - p _ {i} ^ {(t)}) ] = \eta \mathbb {E} [ \tilde {r} _ {t} ^ {(g)} \mathbf {1} (I _ {t} ^ {(g)} = i) ] = \eta \mathbb {E} _ {\mu_ {t}} [ \mathbb {E} [ \tilde {r} _ {t} ^ {(g)} \mathbf {1} (I _ {t} ^ {(g)} = i) | \mu_ {t} ] ]. +$$ + +Noted that $\mathbb{E}[\tilde{r}_t^{(g)}\mathbf{1}(I_t^{(g)} = i)|\mu_t]$ is monotonous with $p_i$ , hence monotonous with $\theta_{i}$ . + +Further + +$$ +\begin{array}{l} | \theta_ {i} ^ {(t + 1)} - \theta_ {i} ^ {(t)} | \leq \eta \max _ {g} | \tilde {r} _ {t} ^ {(g)} (\mathbf {1} (I _ {t} ^ {(g)} = i) - p _ {i} ^ {(t)}) | \\ \leq \eta \max _ {g} | \tilde {r} _ {t} ^ {(g)} | \leq \eta \sqrt {G}. \\ \end{array} +$$ + +Now we only need to lower bound the second momentum of + +$$ +\Delta_ {i, i ^ {*}, t} = \frac {\left(\theta_ {i} ^ {(t + 1)} - \theta_ {i} ^ {(t)}\right) - \left(\theta_ {i ^ {*}} ^ {(t + 1)} - \theta_ {i ^ {*}} ^ {(t)}\right)}{\eta} +$$ + +Noted that + +$$ +\theta_ {i} ^ {(t + 1)} - \theta_ {i} ^ {(t)} = \frac {\eta}{G} \sum_ {g = 1} ^ {G} \tilde {r} _ {t} ^ {(g)} \mathbf {1} (I _ {t} ^ {(g)} = i). +$$ + +It holds that + +$$ +\sigma_ {t} = \sqrt {\frac {1}{G} \sum_ {g} (r _ {t} ^ {g} - \mu) ^ {2}} = \sqrt {\frac {1}{G} \sum_ {g} r _ {t} ^ {g} - 2 \mu r _ {t} ^ {g} + \mu^ {2}} = \sqrt {\mu - \mu^ {2}}. +$$ + +Therefore when $r_t^{(g)} > 0$ + +$$ +\tilde {r} _ {t} ^ {(g)} = \frac {r _ {t} ^ {(g)} - \mu_ {t}}{\sigma_ {t}} = \frac {1 - \mu_ {t}}{\sigma_ {t}} = \sqrt {\frac {1 - \mu_ {t}}{\mu_ {t}}} \geq \sqrt {\frac {1}{G - 1}}. +$$ + +Because all $\tilde{r}_t^{(g)}$ are the same when $r_t^{(g)} > 0$ , it holds that when $i \in [K]$ + +$$ +\begin{array}{l} \Delta_ {i, i ^ {*}, t} ^ {2} = \frac {1}{G} \frac {1 - \mu_ {t}}{\mu_ {t}} \left(\sum_ {g = 1} ^ {G} {\bf 1} (I _ {t} ^ {(g)} = i) - {\bf 1} (I _ {t} ^ {(g)} = i ^ {*})\right) ^ {2} \\ \geq \frac {1}{G (G - 1)} \left(\sum_ {g = 1} ^ {G} \mathbf {1} \left(I _ {t} ^ {(g)} = i\right) - \mathbf {1} \left(I _ {t} ^ {(g)} = i ^ {*}\right)\right) ^ {2}. \\ \end{array} +$$ + +This then implies + +$$ +\mathbb {E} [ \Delta_ {i, i ^ {*}, t} ^ {2} ] \geq \frac {1}{G (G - 1)} \mathbb {E} \left[ \left(\sum_ {g = 1} ^ {G} {\bf 1} (I _ {t} ^ {(g)} = i) - {\bf 1} (I _ {t} ^ {(g)} = i ^ {*})\right) ^ {2} \Big | \mu_ {t} \neq 1, 0 \right] +$$ + +One can without loss of generality assume $I_{t}^{(G)} = K + 1$ and show that + +$$ +\begin{array}{l} \mathbb {E} \left[ \Delta_ {i, i ^ {*}, t} ^ {2} \right] \geq \frac {1}{G (G - 1)} \mathbb {E} \left[ \left(\sum_ {g = 1} ^ {G - 1} \mathbf {1} \left(I _ {t} ^ {(g)} = i\right) - \mathbf {1} \left(I _ {t} ^ {(g)} = i ^ {*}\right)\right) ^ {2} \right] \\ \geq \frac {1}{G} \mathbb {E} \left[ \left(\mathbf {1} \left(I _ {t} ^ {(1)} = i\right) - \mathbf {1} \left(I _ {t} ^ {(1)} = i ^ {*}\right)\right) ^ {2} \right] = \frac {p _ {i} + p _ {i} ^ {*}}{G} \geq \frac {1}{2 K G}. \\ \end{array} +$$ + +When $i \neq K$ , noted that $\left(\theta_{i}^{(t+1)} - \theta_{i}^{(t)}\right) - \left(\theta_{i^{*}}^{(t+1)} - \theta_{i^{*}}^{(t)}\right) > \left(\theta_{i}^{(t+1)} - \theta_{i}^{(t)}\right) > 0$ . Therefore, a similar bound can show that $\mathbb{E}[\Delta_{i,i^{*},t}^{2}] > \frac{1}{2KG}$ . This then concludes the proof with $C_{1} = \eta / 2KG$ and $C_{2} = \sqrt{G}$ . + +□ + +# C.4 Diversity Never Improves with KL regularization + +Theorem C.8 (Diversity Preservation under KL Regularization). With $p_0$ as the initial policy and KL regularization hyperparameter $\beta > 0$ , if the REINFORCE process converges to policy $p^*$ . Then, $p^*$ satisfies: + +$$ +\frac {p ^ {*} (i)}{\sum_ {j = 1} ^ {K} p ^ {*} (j)} = \frac {p _ {0} (i)}{\sum_ {j = 1} ^ {K} p _ {0} (j)} \quad \forall i \in \{1, \dots , K \}. +$$ + +Consequently, the distribution over the optimal arms under $p^*$ matches the initial distribution $p_0$ restricted to these arms and renormalized. + +Proof. Using policy gradient theorem, we know that the converged policy $p^*$ and corresponding parameter $\theta^*$ satisfy that, + +$$ +\nabla_ {\theta} \left[ \sum_ {i = 1} ^ {K + 1} r _ {i} p _ {i} + \beta \mathrm {K L} \left(p | p ^ {0}\right) \right] \Bigg | _ {\theta = \theta^ {*}} = 0 +$$ + +This then suggests that for any $k$ + +$$ +r _ {k} p _ {k} ^ {*} - p _ {k} ^ {*} \sum_ {i = 1} ^ {K + 1} r _ {i} ^ {*} p _ {i} ^ {*} + \beta \sum_ {i = 1} ^ {K + 1} \nabla_ {\theta_ {k}} [ p _ {i} \log p _ {i} - p _ {i} \log p _ {i} ^ {0} ] = 0 +$$ + +This is equivalent to + +$$ +r _ {k} p _ {k} ^ {*} - p _ {k} ^ {*} \sum_ {i = 1} ^ {K + 1} r _ {i} ^ {*} p _ {i} ^ {*} + \beta \sum_ {i = 1} ^ {K + 1} (\mathbf {1} (i = k) - p _ {k} ^ {*}) p _ {i} ^ {*} (\log p _ {i} ^ {*} + 1 - \log p _ {i} ^ {0}) = 0 +$$ + +Simplifying + +$$ +r _ {k} + \beta (\log p _ {k} ^ {*} + 1 - \log p _ {0}) = \sum_ {i = 1} ^ {K + 1} r _ {i} ^ {*} p _ {i} ^ {*} + \beta \sum_ {i = 1} ^ {K + 1} p _ {i} ^ {*} (\log p _ {i} ^ {*} + 1 - \log p _ {i} ^ {0}) +$$ + +For all $k \in [K]$ , we know that $r_k$ is equivalent, therefore, $\frac{p_k^*(i)}{p_0^*(i)}$ is a constant for $k \in [K]$ , concluding our proof. + +# C.5 Technical Lemma + +Lemma C.9. For $x\in \mathbb{R}$ $|x| < C$ , it holds that + +$$ +\exp (x) > 1 + x + A _ {C} x ^ {2} +$$ + +here $A_{C} = \frac{\exp(-C) + C - 1}{C^{2}}$ + +Proof. Define $g(x) = \frac{\exp(x) - 1 - x}{x^2}$ , this function monotonically increases when $x < 0$ . + +# D Open-Thoughts Evaluation + +We finetune Qwen2.5-7B-Instruct over OpenThoughts-114k for 5 epochs using BF16 and AdamW and hyperparameters lr=1e-5, bs=128, warmup=150 steps. We sample 40 reasoning traces with temperature set to 0.7 for each of the 30 problems in AIME24. Then we evaluate the following quantities. + +![](images/73d34b55c39b755f68e6950c8eebdf29d21cb222617fda1fe97a55a0270a9208.jpg) +Competition Math (AIME24) + +![](images/5aa59d6e2aba258b1051ce25b0904b17cd2e7490d5e852f24f61c5460a902111.jpg) + +![](images/b920292dcecb0dc0a1bc2d82f9a4af452fedf7363369eff449048c35d2f2d1b2.jpg) +Figure 10: Pass@K Evaluated on AIME24 over OpenThoughts-114K SFT checkpoints. We plot the expected Pass@K ± SD. Note that improvements in Pass@K slows down while Pass@1 improves at a constant rate. Furthermore, the confidence interval of Pass@1 widens, meaning the variance increases during SFT. + +![](images/5667e64bdc969d25573bf150b13c2046ca2168b091045a68f3bfe49c56c2ea9e.jpg) +Figure 11: Histogram of Pass@1 over AIME24. Variance of Pass@1 increases over finetuning on OpenThoughts-114K. We note that since AIME24 only has 30 questions, the density plot may not be completely reliable. + +![](images/ce0b365e2d0cfdbc6033585c43605d8fc9a10bbe002ac2b4022da555660ffbd8.jpg) + +![](images/a68455b7785772599da90654e44c982cadd4ddd78f610dab8af5816a70e0c43c.jpg) + +![](images/25ebeb290d7259127374c9c05e2e0ce5075d7b65dfef8be8699ed49268e3b01a.jpg) + +![](images/3eb53d8c924bb92658ec5635c7b2ae63fb3273756bce235deb3ec5596f40bd57.jpg) + +![](images/bb1d37404a819e2ad64f45cefe623549253879023f3bf030ffe2894bde9a30af.jpg) +Figure 12: We plot the average number of unique answers sampled over the total number samples i.e. $\left|\left\{y_{i}\right\}_{i=1}^{n}\right| / n$ . Model samples less diverse number of answers as SFT progresses. + +# E Interpolation Coefficients + +![](images/408e81cdfc67395009c30c39aad9c4a31fa71dc028099dd11d2d322e044cc302.jpg) + +![](images/5cc7d3e6e8dc1b3da5bcfeaac5e13c685f56aae0ab567a53aa222a84743494fd.jpg) +WiSE-Step672 on MATH500 + +![](images/b592a7e7ccb7f34cab0c0fe80f8b46299a170f5e015417c6a260678ca37e45d5.jpg) +WiSE-Step672 on MATH500 +WiSE-Step672 on MATH500 + +![](images/e15bf9634190a320923c26e9e93036be5f32c027f6a9256280b04a259db1412f.jpg) +WiSE-Step672 on MATH500 + +![](images/a8fef11dae9deb43d79b665e01086d9767e0568d82dec02ac4133daecf6e053a.jpg) +WiSE-Step896 on MATH500 + +![](images/b5a0036c7749a83fbea586e2ad026ccfe84a4e95cb087bdcfd3c7171f8cbc3f2.jpg) +WiSE-Step896 on MATH500 + +![](images/e0ef05e879f2cabc6cc375cd5dfc6c14399c3e8c637b35a2bdd2dcd0382c64de.jpg) +WiSE-Step896 on MATH500 + +![](images/cd383a929ba7e9a9865537886d40e7be10298f21f2a4eee59bb64f1c03895e08.jpg) +WiSE-Step896 on MATH500 + +![](images/3d13f320227abed29d629f8ace04823e9059603f82a24fb7c3c0d25ac51e5eb4.jpg) +WiSE-Step1120 on MATH500 +Figure 13: Pass@1 versus Pass@K of WiSEFT of Qwen-2.5-0.5B trained and evaluated on MATH500. We interpolate between model $\pmb{w}_0$ at Step 112 with $\pmb{w}_t$ for $t\in [672,896,1120]$ as $\delta \pmb{w}_0 + (1 - \delta)\pmb{w}_t$ where $\delta \in [0.1,0.9]$ . + +![](images/d05652d4c9f3cc0229b825169a87f0a25576d63a99afa799f8969368cab3b996.jpg) +WiSE-Step1120 on MATH500 + +![](images/a79e505ca270a42946351e05193fde44a40c51342de249abe1125406caef19ef.jpg) +WiSE-Step1120 on MATH500 + +![](images/4970e2bbeb01a58d65034f7eee2d971620f82a124a24f05a15a0868fd8060784.jpg) +WiSE-Step1120 on MATH500 + +![](images/6415bf26abc61b3f3704f1e93f285ad0c1d640bbb79a1978e63f250e5f01c217.jpg) + +![](images/cee9d9f66f1ba05e20958f6280cd378d5414554e40cbe63513148f4d200be612.jpg) + +![](images/7e11ff93e0b785d9953e3d89bcd12390815907a6dc2f3d35716009e51d01b2a9.jpg) + +![](images/e2a337f3f3b316e21bd15dad207ede3c32034de3208f9ee58d3e8e8a316f2a94.jpg) + +![](images/c5f007038e4a8539395358cfc078786e75a2500ccebd39ea8dcc0d9f3edd38c9.jpg) + +![](images/8ab320b8b9b148e7007304b25bcea6941eee571cdcfb9f2bcb270cc915f41d2e.jpg) + +![](images/534a1915e7237bbe8eb59cd3e2becaa163ccc90f8b40a09cb87b02ff2f834f83.jpg) + +![](images/88a2aea4db6f0ceb07a3f515c45521b42aef420591197fc4ac31c9e750a6eb8c.jpg) + +![](images/d98f4a0d11c754910a2b645db67fdc99936a484cb60dcd83a0344a2437fc161b.jpg) + +![](images/5186f02cf14107fb5f45cd06a48b8a84b8d148a57fc1f02fb24e57f2de6f0b2c.jpg) + +![](images/6879e45cb82d6e4e3cb71ac588c9076b39549c4f56feda977179787fe237fef8.jpg) + +![](images/7c463a6b643c401de38db5cb6513d86e528960fd75d924839fc2a365a4fe7d82.jpg) + +![](images/210f985ab8344ab9024b0e8866757143d08c6a402d3b38801bfe06bad03d0471.jpg) + +![](images/97b56b7e63b48c137ae6e876983d6e3b63f26d408263648812f7955049209453.jpg) + +![](images/6583cb61c58420a22e2b481d3ecd6ab5badcb2086aeae20b3945609becb32131.jpg) + +![](images/76cd3796b40caac6ff8a02aec6cb5d704946a539355c961c1da0cde04fcc0f3b.jpg) + +![](images/9b0b5f9803b94a0b5f4b2b333482b5acab14a7b4396c642f98d4c63f944bbd16.jpg) +Figure 14: Pass@1 versus Pass@K of WiSEFT of Gemma-2-2B trained and evaluated on GSM8K. We interpolate between model $\pmb{w}_0$ at Step 171 with $\pmb{w}_t$ for $t \in [342, 684, 1026, 1368, 1710]$ as $\delta \pmb{w}_0 + (1 - \delta) \pmb{w}_t$ where $\delta \in [0.05, 0.9]$ . + +![](images/4c43efb60dc7fd63fa7e0bf6fe8153abec532160cb2af6bb071e080da392c5f2.jpg) + +![](images/ad7003e3f0b25d46193c9fffc98906a772a124775a56cd8b98a55940824b50a2.jpg) + +![](images/b1061168966769f0a706a6cdefd0ca7df762aaf6f5cf2e53465bfdddcf6d261d.jpg) + +# F Measuring Diversity of Traces + +We measure the diversity of the 100 sampled traces of Gemma-2-2B across GSM8k test. We measure diversity in terms of 3 different measures. + +Output Diversity The cardinality or number of unique answers in the set of all model outputs $\left|\{\hat{y}_1,\hat{y}_2,\dots ,\hat{y}_n\}\right|$ over the total number of traces. + +Operation Diversity In GSM8k, each intermediate step consists of basic arithmetic operations, e.g. $5 + 3 = 8$ . We may simply map each of the traces to the sequence of arithmetic operations the model steps through, i.e. $r_i \rightarrow [o_1, o_2, \ldots, o_t]$ . This mapping is extracted by code. Then, given this set, we measure unique sequence of operations over the number of total traces. + +Semantic Diversity We measure the similarity of trace using cosine similarities between the text-embeddings (Bilmes, 2022; Yu et al., 2023). + +# F.1 Does temperature increase diversity? + +Temperature does increase diversity, but it also increases the chances of sampling outlier answers. + +![](images/e5ccfdef5708eae2cdd50fb2c0053f33997475088945e324b1799671240c70ec.jpg) + +![](images/6505c814dcb35aa9757d8be7050bd89319b4a0aae6db8d965dfe1bf81985e105.jpg) +Diversity Across SFT [T=1.0] + +![](images/9edcca763df460a84257bc718bc153f2f8f5a4c6a79bbf78ad4b859738b6d86e.jpg) +Diversity Across SFT [T=0.8] + +![](images/6357c2ac829597a5cabbd3f6f2c13751aeaac97b2934f7e1144f19fda33b7246.jpg) + +![](images/7ad9bd5e57c1c23dbd6a2fe73d568d72272de9cf3ff807b8a8da06b7e3ec8421.jpg) + +![](images/c03ad321d119e163d7d652bd75fdd1be669582b2e192edd3f0c824e236fa1553.jpg) +Diversity Across SFT [T=1.5] + +![](images/946d993abc5cb2f9296711897d897572593e58f08517be74488fe99906faa457.jpg) + +![](images/4cca06e5c218704714c26e86a6ac995e438bcee795019154e74edbfe68091d0f.jpg) + +![](images/6c18e77dd874f11856943ca580d37e31d9687c5a7be4bc53d7a94208e6f4079a.jpg) +Figure 15: Diversity of traces sampled with Temperature $\in$ {0.8, 1.0, 1.5} for Gemma-2-2B SFT checkpoints on GSM8k + +![](images/445fd20e4c1373f0114643f0a40149f387735f7d61bc9ab734ce91f06f149ec6.jpg) + +![](images/c20602e164d649b2cede3ffcbe6d6d45d34e0fbfdb8701a97c0a3495fe22a13a.jpg) + +![](images/04a8123274c3fe4a7a0f718441ab029fcec04c65de463ce4d5f4f8ddb1113e96.jpg) + +# F.2 How well do token-level diverse decoding strategies compare with optimal strategy with oracle? + +Hyperparameter Tuning Details We grid search for optimal temperature for all baselines over $T = [0.8, 1.0, 1.2, 1.5, 1.8]$ . For nucleus, we choose the best cutoff threshold between $[0.8, 0.9, 0.95]$ . For min-p, we choose the best probability threshold between $[0.01, 0.05, 0.1]$ . For tokenwise top-k, we choose best k between $[12, 25, 50]$ . + +
Decoding StrategyPass@2Pass@4Pass@8
Naive0.5650.6660.760
Nucleus0.5660.6680.757
Min-p0.5660.6680.760
Top-k0.5630.6660.756
Top-k w/Oracle0.7600.8320.901
+ +Table 2: Best Pass@k of Sampling Strategies for Qwen-2.5-0.5B over SFT checkpoints + +
Decoding StrategyPass@2Pass@4Pass@8
Naive0.5470.6480.737
Nucleus0.5280.6170.694
Min-p0.5500.6550.744
Top-k0.5380.6460.738
Top-k w/Oracle0.7300.8140.878
+ +Table 3: Pass@k of Sampling Strategies for Qwen-2.5-0.5B at Last SFT Checkpoint + +![](images/c6ea5a13b67d1c489b5482fd1dc7e9c590db2bda35acfbb90c820e3cee9fbbba.jpg) +Figure 16: Pass@K over different Min-P thresholds $\gamma \in [0,0.3]$ and temperatures $T\in [1,1.6]$ for Gemma2-2B finetuned on GSM8K. Generally, no min-p threshold paired with high temperature $\mathrm{T} = 1.6$ (in light green) is able to surpass the Pass@1 of $\mathrm{T} = 1$ with best min-p threshold (in orange). In other words, unlike WiSE-FT which increases both Pass@1 and Pass@K, Pass@1 tends to still decrease for the diverse decoding strategy of applying min-p with high temperature. + +![](images/a63859f2d49730f3fc594d38e64daee690ae73ff3dfdfd4802371d22024209fe.jpg) + +![](images/e2ab7f724e76325f770f6dd199c9afc3e5801a548582d0e9ac5362497292c00e.jpg) + +![](images/528a40964d3bfda6c2aef41d322a1d87b86f248ba95412cafdaaf3724d5c8979.jpg) + +![](images/510bcdc6999edfdaf5ad51d42c4d837bb3ee069afcc24f8a26bd6ee5bc71c4e7.jpg) + +![](images/39f305f282f3aa2b8d837c2869699fbd2bb84a64fa117b53ed57a515d2e954a8.jpg) +Figure 17: Pass@k of Gemma-2-2B GSM8k Naive Sampling with Replacement + +![](images/393c4e789004c9b1d25cabc33e6caa251b9a30ac8408881cef3e9408d407b11f.jpg) +Figure 18: Pass@k of Gemma-2-2B GSM8k Oracle Top K Sampling + +![](images/746a4c2b336ab6d80695d60f5f6112ea89a75779153cb6d50e8f7c5219e462ab.jpg) + +![](images/1eb6b1fd37997ea936e23fe598e2d89e0cb3ea24361199d8c518915d5a76ffb0.jpg) + +![](images/d3bd9e86127b96e78ed2a923eb025ecabea92613c76f789c22579bde4d166df7.jpg) + +![](images/a3080dc079b0901c51c02399a1907b2e2c96b0a922a54f5a446ed2ca4860e645.jpg) + +![](images/07caf13e3b113102c5e105ffade1b5592d6b69e20a12f6d7be497607fbc435bf.jpg) + +![](images/fc1e852587bf0d908433faf87683b609d90596c02a702dd55504a960a15c609c.jpg) + +![](images/9e6d23e6964203140d9a54ab2c01c778ff9fc0a06702b556276de27f651dd276.jpg) + +![](images/010ca52fb4d6d2f4a74f158e3d4b742d0d60ff44e3344acc665ea390dcd0d87d.jpg) + +![](images/34da879b88c056d54890866c39e610bb90307c00176ff655d5e91b80505a2801.jpg) +Figure 19: Pass@k of Qwen-2.5-0.5B GSM8k Naive Sampling with Replacement + +# F.3 Diversity Comparison Between SFT and WiSE-FT + +![](images/056234a694a700f1c7f01a6e9ed20094d7cd0fdb69eb5de2b5a653d849348ef9.jpg) + +![](images/8a3d27f0b455df3c98d77c12e7837d76fbf9b29395a7db957519d3cc75142e50.jpg) + +![](images/db799a546fac2266a4fb6e884a368cf79f696cc523c048ac9f7712be215438ae.jpg) + +![](images/f3af3a04795cd7ec6e134e72ceb0fd20ab57c4f2f3d29acda347037690cecaa8.jpg) + +![](images/335ad53fb8c267ab9f4d31675ae0ac9c056c01642d53698f0a201571736eb81f.jpg) +Figure 20: Pass@k of Qwen-2.5-0.5B GSM8k Oracle Top K Sampling + +![](images/ccfab5cfd7bae8ef782e079cda3aeda4adbe6f786122fc77799b75ec40133ee2.jpg) + +![](images/5c78239b846481b7bf9c97f9b381676efebb712f06044215ffc0828a1d3181dd.jpg) +Figure 21: Operation, Semantic, and Answer Diversity of Gemma-2-2B checkpoints of SFT over GSM8K versus the corresponding WiSE-FT variants (with the earliest checkpoint). We decode with temperature set to 1.0. + +![](images/4546a62db2a2c93d5719df6b62375b8b3d804a1758924ed0422076a662d1a358.jpg) +Figure 22: Operation, Semantic, and Answer Diversity of Gemma-2-2B checkpoints of SFT over GSM8K versus the corresponding WiSE-FT variants (with the earliest checkpoint). We decode with temperature set to 1.6. + +# G Best of K Evaluation + +![](images/9e3fdc034bc19b8587b4804417cfbed97b363f7d3658230fe4584772068195a9.jpg) + +![](images/b44675a6666bda0b73ef9e200a9b1d1022ee221c3d4530367974a9e78cee2014.jpg) + +![](images/aeb10270cd0a472e542fdfa57315bbbb7b5ba41555291cba501c05e400478d37.jpg) + +![](images/b2d2e4fd5b348100cefd256746110d2df91d3244a48b747cbf5561ad703f3c6e.jpg) + +![](images/ec5aabdb8afc02d520db433b2ed880cfb460293ab4f514cf1cd416a0e05b8f01.jpg) +Figure 23: Best@K performance on MATH500 with ORM verifier, comparing different SFT and WiSE-FT checkpoints of Qwen-2.5-0.5B for $K = 2,4,8,32$ + +![](images/814e37e4a8cfad542f088af46bbd948f458f6686fdea387a041191635dae743d.jpg) + +![](images/dcc8297332fec4d4f444504910cf3c945a0afaf6ec9a11be41dfb59a185c4df3.jpg) + +![](images/04d3dcc21b958936bdc9a1f91c07188c307c9efefc4ccbe3518792514baf7514.jpg) + +![](images/568239dfe0d053038302fc7fbde2c6756bc3312c7e80cec28090883679638505.jpg) + +![](images/860ad658417f481695762b3985aa86cc6c8648a27a1974383b58f81738a321ca.jpg) + +![](images/b0c17f52c3d8d9f710084b2e71cd83327cb690eb5a0d9b91600ddb7e86032d53.jpg) + +![](images/81519a54b17fc6c666684e274696d0ac2127e9fe4ff88d0d9326a87c6149a208.jpg) + +![](images/205af94455736f91afbef56faed86d6536d4884d2490f957f228fe6f9d772c60.jpg) +Figure 24: Best@K performance on MATH500 with ORM (Top) and Majority Vote (Bottom) for early, middle, and late SFT checkpoints and WiSE-FT counterparts, showing Qwen-2.5-0.5B's scaling across K values. + +# H Diversity Collapse and WiSE-FT Results for the Coding Task + +To test whether coding tasks exhibit the same diversity collapse observed in reasoning benchmarks, we fine-tuned the Qwen2.5-coder-0.5B model for 10 epochs on the Magicoder-Evol-Instruct-110K dataset, following the Stage 2 SFT recipe from OpenCoder LLM. We then applied WiSE-FT by interpolating the weights of the second SFT checkpoint with the initial model using interpolation ratio 0.5. Both the original SFT checkpoints and their WiSE-FT counterparts were evaluated on HumanEval for pass@k. + +![](images/60a8b9a9af69b7127d68729341fe2361371efa2b3e4bacb8eec3f42fc9ec84a0.jpg) + +![](images/9dc1172db134cef8ae1e854cc6f68ebfe8e6c96aaeacb27c5b0c870ad9752a67.jpg) + +![](images/cafeaf2d7bb4404026a1ba3699040624b79be50df113c5458aaa97d41afc6c76.jpg) + +![](images/1e7d7a28f9f5517750d4bcda26579c66a321c8f7a9d9072a3c097f819faa0084.jpg) + +![](images/d837c799a3424c44d929865ee8c5c17f2da827b7e87b44e7380e31803ce3e9b3.jpg) +Figure 25: Best@K performance on MATH500 with majority voting, comparing different SFT and WiSE-FT checkpoints of Qwen-2.5-0.5B for $K = 2, 4, 8, 32$ + +We found that, much like in mathematical reasoning tasks, SFT on coding data indeed suffers from diversity collapse: although pass@1 steadily improves over epochs, pass@k begins to deteriorate. And WiSE-FT still improves performance and mitigates the diversity collapse. + +![](images/cb3ca09225ec7f2708663a63adf1bb7d1807c672a937ff5625e5894c7f467191.jpg) +HumanEval - Pass@k Across SFT Checkpoints + +![](images/293bfc0fc27150b8eec52e2f179c7a8bee6d05de89eb6f954bb89407cf535e84.jpg) +Figure 26: Pass@K performance of SFT checkpoints on HumanEval (temperature = 1.0). + +![](images/dc80c75a67d472177dcc63f0074f37ec267e332b7e7317accb544868294cd22e.jpg) + +![](images/aa8a4b4c00fe197e0575e11b2fecb88604a4562cbe6fb459230d83e7172326d4.jpg) +HumanEval - Pass@k Across Checkpoints (SFT vs WiSE-FT) + +![](images/6f01ccaf5c53f26ffda8ffe2a095592c21beb991d051e6b3ecc692744b009663.jpg) +Figure 27: Comparison of pass@K for SFT checkpoints and their WiSE-FT counterparts at $k = 1$ , 16, 64. + +![](images/adce33339af950e6097d8809ac7898c79d536d03de13d3b42c8e47246adb4ef3.jpg) + +![](images/b5e95e0338cedd9b1f43533a5dbb16642aa6e174015a5f1dd91e1e8a66e77aa3.jpg) +HumanEval - Last Checkpoint (1700) Comparison: SFT vs WiSE-FT +Figure 28: Pass@K performance of the final SFT checkpoint versus its WiSE-FT variant. \ No newline at end of file diff --git a/data/2025/2504_10xxx/2504.10478/images/010ca52fb4d6d2f4a74f158e3d4b742d0d60ff44e3344acc665ea390dcd0d87d.jpg b/data/2025/2504_10xxx/2504.10478/images/010ca52fb4d6d2f4a74f158e3d4b742d0d60ff44e3344acc665ea390dcd0d87d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7ef11c6110d0905edc1b2c979fa13e3d6d212944 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/010ca52fb4d6d2f4a74f158e3d4b742d0d60ff44e3344acc665ea390dcd0d87d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b5ab4b3a7da701a35ab2875f0651a90acf1d8b8cded2004eb550b46eab0c477c +size 17896 diff --git a/data/2025/2504_10xxx/2504.10478/images/010df19603f6a8b8aed4c150ff7104474ebf1313d06df35b56b879fe8d0142f9.jpg b/data/2025/2504_10xxx/2504.10478/images/010df19603f6a8b8aed4c150ff7104474ebf1313d06df35b56b879fe8d0142f9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d435ff31715d8df1d5a99797f52d5cb73299b119 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/010df19603f6a8b8aed4c150ff7104474ebf1313d06df35b56b879fe8d0142f9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d3e3ea700e0b550c15c523e78d53b529970f2f3ceb4c81467c1e8aa6aa5b2d91 +size 11539 diff --git a/data/2025/2504_10xxx/2504.10478/images/025400c251313c19885eeda9c411127b9f8264a71c6d8cea574e0029094f187b.jpg b/data/2025/2504_10xxx/2504.10478/images/025400c251313c19885eeda9c411127b9f8264a71c6d8cea574e0029094f187b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ba6cfa39de309083505fcf3e2417d9c0afa29737 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/025400c251313c19885eeda9c411127b9f8264a71c6d8cea574e0029094f187b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f32714e1832878ce1b47e6cfda07eb905366672d387539bd7442010afa377fd1 +size 4225 diff --git a/data/2025/2504_10xxx/2504.10478/images/02544de3a14cfeaa6aea139b5b0ab1cbd6ec4f541559f7e7213fbf4c3e2553a9.jpg b/data/2025/2504_10xxx/2504.10478/images/02544de3a14cfeaa6aea139b5b0ab1cbd6ec4f541559f7e7213fbf4c3e2553a9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..87425be102b26fddb6efa291305185a7daf134a9 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/02544de3a14cfeaa6aea139b5b0ab1cbd6ec4f541559f7e7213fbf4c3e2553a9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:70942cd36049449ae3847c7e1cddbef73bf6c28c63cc772a86142ccee0db6068 +size 4828 diff --git a/data/2025/2504_10xxx/2504.10478/images/04a8123274c3fe4a7a0f718441ab029fcec04c65de463ce4d5f4f8ddb1113e96.jpg b/data/2025/2504_10xxx/2504.10478/images/04a8123274c3fe4a7a0f718441ab029fcec04c65de463ce4d5f4f8ddb1113e96.jpg new file mode 100644 index 0000000000000000000000000000000000000000..faf3347becfa2cd6b55abba0fa63f7a934f239f2 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/04a8123274c3fe4a7a0f718441ab029fcec04c65de463ce4d5f4f8ddb1113e96.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fbeb4300f160b6d6aa9f087160af61f2e7e2623a3253ab1e57aeae956eb29d79 +size 14289 diff --git a/data/2025/2504_10xxx/2504.10478/images/04d3dcc21b958936bdc9a1f91c07188c307c9efefc4ccbe3518792514baf7514.jpg b/data/2025/2504_10xxx/2504.10478/images/04d3dcc21b958936bdc9a1f91c07188c307c9efefc4ccbe3518792514baf7514.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fc04afb8fcc984b96d5c27802934393af64fc457 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/04d3dcc21b958936bdc9a1f91c07188c307c9efefc4ccbe3518792514baf7514.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bea75663cf6a947a81807a3dbed676c2385694d9259a26db556cdab8aeec3b57 +size 17422 diff --git a/data/2025/2504_10xxx/2504.10478/images/056234a694a700f1c7f01a6e9ed20094d7cd0fdb69eb5de2b5a653d849348ef9.jpg b/data/2025/2504_10xxx/2504.10478/images/056234a694a700f1c7f01a6e9ed20094d7cd0fdb69eb5de2b5a653d849348ef9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d04431137a171868249323c2ac57e210de582c65 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/056234a694a700f1c7f01a6e9ed20094d7cd0fdb69eb5de2b5a653d849348ef9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:123975f30a2e372c7a0b6fe25cd9588e0fa8030408d0f43ca0c8eb416927e951 +size 17343 diff --git a/data/2025/2504_10xxx/2504.10478/images/07caf13e3b113102c5e105ffade1b5592d6b69e20a12f6d7be497607fbc435bf.jpg b/data/2025/2504_10xxx/2504.10478/images/07caf13e3b113102c5e105ffade1b5592d6b69e20a12f6d7be497607fbc435bf.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f9d8816fe27e78e02d6b9f9128fa3fe68df3845c --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/07caf13e3b113102c5e105ffade1b5592d6b69e20a12f6d7be497607fbc435bf.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c76d73d75100dffbc9b681d3125a48ff17cf3e2e9bba676497c57aa0eb2936bb +size 15341 diff --git a/data/2025/2504_10xxx/2504.10478/images/083eced91fb46849603d1ad72fa239d4acbfb899ad82936da65f408e5a04fcc8.jpg b/data/2025/2504_10xxx/2504.10478/images/083eced91fb46849603d1ad72fa239d4acbfb899ad82936da65f408e5a04fcc8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..16833271bd182e8da57a0a373fc7b1b82547ba79 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/083eced91fb46849603d1ad72fa239d4acbfb899ad82936da65f408e5a04fcc8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2186efb5eaf24cec02084fb5a10e22cf80024340e0a67ff5596031205639fba0 +size 7156 diff --git a/data/2025/2504_10xxx/2504.10478/images/08617bf65e4ebad736f26039f5ce3bc40693f92d1f1c5988d045480873e04806.jpg b/data/2025/2504_10xxx/2504.10478/images/08617bf65e4ebad736f26039f5ce3bc40693f92d1f1c5988d045480873e04806.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a89a49f3c24f755997e4719ced8fb490a0358baf --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/08617bf65e4ebad736f26039f5ce3bc40693f92d1f1c5988d045480873e04806.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8c6df7f2e2f4db91dd4776b4e4473f6a38c8f410e1add8448741ebd0ca536d04 +size 4445 diff --git a/data/2025/2504_10xxx/2504.10478/images/09a06b40fa4ea2f11c53a1fb0b61297195528e6be636a1b05e4aa3162e56bcea.jpg b/data/2025/2504_10xxx/2504.10478/images/09a06b40fa4ea2f11c53a1fb0b61297195528e6be636a1b05e4aa3162e56bcea.jpg new file mode 100644 index 0000000000000000000000000000000000000000..960cf4698b007a17e047cdd8b3a1975f746ba0bb --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/09a06b40fa4ea2f11c53a1fb0b61297195528e6be636a1b05e4aa3162e56bcea.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:467b47c0711fba98c3224a9c3758a88392a0eb3f6f2c4da852434345c9b07675 +size 9695 diff --git a/data/2025/2504_10xxx/2504.10478/images/0c4250ef61690cb6f1d8c4915cae968117a343314b9954e0c0942cc5689c68ec.jpg b/data/2025/2504_10xxx/2504.10478/images/0c4250ef61690cb6f1d8c4915cae968117a343314b9954e0c0942cc5689c68ec.jpg new file mode 100644 index 0000000000000000000000000000000000000000..469a94aa2231a0bee28eb04823826b8b91d697b6 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/0c4250ef61690cb6f1d8c4915cae968117a343314b9954e0c0942cc5689c68ec.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1493fd060c0c9bb5673f6eaa1c29a912cb87626151c7eee8399497f01662b041 +size 5815 diff --git a/data/2025/2504_10xxx/2504.10478/images/0c94c758263527048a63f3968a11597da849450b1792cda84ac408bad5f98b58.jpg b/data/2025/2504_10xxx/2504.10478/images/0c94c758263527048a63f3968a11597da849450b1792cda84ac408bad5f98b58.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6827205de5affcf64e8881aef68b48bf62d0b5c2 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/0c94c758263527048a63f3968a11597da849450b1792cda84ac408bad5f98b58.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:58ce24ec342bb731d2c72e60abd33acc569d728b2f73aa32a4c1e5c478793268 +size 17135 diff --git a/data/2025/2504_10xxx/2504.10478/images/0ca043e2ceef566089e4e9de31db5711daf3366c9e45fb0dd7cea3659355b3d9.jpg b/data/2025/2504_10xxx/2504.10478/images/0ca043e2ceef566089e4e9de31db5711daf3366c9e45fb0dd7cea3659355b3d9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..33822c669c7baa92ed05c6f4afa247f11f9b0b4d --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/0ca043e2ceef566089e4e9de31db5711daf3366c9e45fb0dd7cea3659355b3d9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8c26d303fe7aa13ca649c8038832a2cedff516d52f9264f04f186f20748b8e41 +size 13659 diff --git a/data/2025/2504_10xxx/2504.10478/images/0d716e431c635ba9b4ddf9553c1ba062384da0054901a2617578c183216748d1.jpg b/data/2025/2504_10xxx/2504.10478/images/0d716e431c635ba9b4ddf9553c1ba062384da0054901a2617578c183216748d1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2468dacf3533ebc15e143b8cef9653ec80211eff --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/0d716e431c635ba9b4ddf9553c1ba062384da0054901a2617578c183216748d1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cecbfb67267a95ee9a297d5dea5bfea297eadbf52e6a4be767cf1bed7a236c3d +size 2924 diff --git a/data/2025/2504_10xxx/2504.10478/images/0f296249734c6e78fe73f1f7ad3efd2ffa14c033f2625ceb5f3ec2ec4cadfdf1.jpg b/data/2025/2504_10xxx/2504.10478/images/0f296249734c6e78fe73f1f7ad3efd2ffa14c033f2625ceb5f3ec2ec4cadfdf1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..37c9fd6dcfd13d2a4c33bfb4899a0f799fdc2dd0 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/0f296249734c6e78fe73f1f7ad3efd2ffa14c033f2625ceb5f3ec2ec4cadfdf1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6f9870773c41e2818f8d663a85b54b66e0f69e1962f759a6bd8452bba694a0f7 +size 6634 diff --git a/data/2025/2504_10xxx/2504.10478/images/105ec9b2f770f3fd0623182efd6b919cba9dbb10c81c24b7a52fedf2e526985a.jpg b/data/2025/2504_10xxx/2504.10478/images/105ec9b2f770f3fd0623182efd6b919cba9dbb10c81c24b7a52fedf2e526985a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ea430e729b121e63be0f6b34688fabc3544043fe --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/105ec9b2f770f3fd0623182efd6b919cba9dbb10c81c24b7a52fedf2e526985a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b9d143aedc76e0e87ef48d81b90f73425ccf2e99a3a45e8d233f6567f451cf26 +size 45669 diff --git a/data/2025/2504_10xxx/2504.10478/images/106be94ca90f21b22c1046ac94af48a64aa8412f6ffde8449c502aa85c296ee4.jpg b/data/2025/2504_10xxx/2504.10478/images/106be94ca90f21b22c1046ac94af48a64aa8412f6ffde8449c502aa85c296ee4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1314ef7434573de318e5c740cedaa28b3a304a6b --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/106be94ca90f21b22c1046ac94af48a64aa8412f6ffde8449c502aa85c296ee4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:50245a2b35325b70396a64b103b4ed5287454e5d5fc72d75b822ea2daa9ade0c +size 3507 diff --git a/data/2025/2504_10xxx/2504.10478/images/110fab68254d7edd76626c12dee15bae4c5510f3d1620d88c62cdb6cd3e849b2.jpg b/data/2025/2504_10xxx/2504.10478/images/110fab68254d7edd76626c12dee15bae4c5510f3d1620d88c62cdb6cd3e849b2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5cfc900fe4b123876de32972690ed16a8d8249f4 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/110fab68254d7edd76626c12dee15bae4c5510f3d1620d88c62cdb6cd3e849b2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6c54ed73dd762b7e5fd39ff93edfa58d88e47862e4094d5b439b55554f4cea5e +size 10854 diff --git a/data/2025/2504_10xxx/2504.10478/images/136c97bd309561e7ca54a0ca2069ad3f0b521147a93b614b1fb712de45f0c740.jpg b/data/2025/2504_10xxx/2504.10478/images/136c97bd309561e7ca54a0ca2069ad3f0b521147a93b614b1fb712de45f0c740.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4c9c76d0f33a5681a1ac56254ccebda60762d319 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/136c97bd309561e7ca54a0ca2069ad3f0b521147a93b614b1fb712de45f0c740.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a5b9c3fd2ed28d1d63a0a1ec8a32b2f4d7b91669ab5f18af2b839028bbbb0488 +size 8659 diff --git a/data/2025/2504_10xxx/2504.10478/images/19a01a99597df80ac8df614f4c1787a0a5b99ab4663cc34196f872471af91463.jpg b/data/2025/2504_10xxx/2504.10478/images/19a01a99597df80ac8df614f4c1787a0a5b99ab4663cc34196f872471af91463.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5142909fb24f2cc02ddba5384c3286cd575dae4e --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/19a01a99597df80ac8df614f4c1787a0a5b99ab4663cc34196f872471af91463.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bec16174219a3aba58e9ec6484c63f279c29e93d6142db21d48673d3e0b2e788 +size 23825 diff --git a/data/2025/2504_10xxx/2504.10478/images/1c07139c31aa052e9802591db451bb95cd466333559e1496c77fb8de1be1c789.jpg b/data/2025/2504_10xxx/2504.10478/images/1c07139c31aa052e9802591db451bb95cd466333559e1496c77fb8de1be1c789.jpg new file mode 100644 index 0000000000000000000000000000000000000000..992bf918468c0ce8514c666718dc27a3d17ac317 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/1c07139c31aa052e9802591db451bb95cd466333559e1496c77fb8de1be1c789.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b37e56bcd3cca667a971890c16d04ba8099e95d6096a8a3e568ca99aa6f7b9df +size 2898 diff --git a/data/2025/2504_10xxx/2504.10478/images/1e7d7a28f9f5517750d4bcda26579c66a321c8f7a9d9072a3c097f819faa0084.jpg b/data/2025/2504_10xxx/2504.10478/images/1e7d7a28f9f5517750d4bcda26579c66a321c8f7a9d9072a3c097f819faa0084.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4cf60cf258d646b5a51a8c6da251ecd588b38fd0 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/1e7d7a28f9f5517750d4bcda26579c66a321c8f7a9d9072a3c097f819faa0084.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7290eccfac0d26a4818a2f08900807d64251f5be731a8a6b35a5a559f15d719c +size 10257 diff --git a/data/2025/2504_10xxx/2504.10478/images/1eb6b1fd37997ea936e23fe598e2d89e0cb3ea24361199d8c518915d5a76ffb0.jpg b/data/2025/2504_10xxx/2504.10478/images/1eb6b1fd37997ea936e23fe598e2d89e0cb3ea24361199d8c518915d5a76ffb0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..94b9dd4f68ad8d185fac125f5f3254a35428dd02 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/1eb6b1fd37997ea936e23fe598e2d89e0cb3ea24361199d8c518915d5a76ffb0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3007c16f9f9ce4985b1d130880751a0581c0c20cb38feef8fc08260f6eb75617 +size 16977 diff --git a/data/2025/2504_10xxx/2504.10478/images/205af94455736f91afbef56faed86d6536d4884d2490f957f228fe6f9d772c60.jpg b/data/2025/2504_10xxx/2504.10478/images/205af94455736f91afbef56faed86d6536d4884d2490f957f228fe6f9d772c60.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5153cf11ecc8b88b648ef1c44f49d4f60b8c0be0 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/205af94455736f91afbef56faed86d6536d4884d2490f957f228fe6f9d772c60.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6f59259c6cfbc978127acae75413984dcd45805f21874e236c215a2a98ec9b9a +size 7404 diff --git a/data/2025/2504_10xxx/2504.10478/images/210f985ab8344ab9024b0e8866757143d08c6a402d3b38801bfe06bad03d0471.jpg b/data/2025/2504_10xxx/2504.10478/images/210f985ab8344ab9024b0e8866757143d08c6a402d3b38801bfe06bad03d0471.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5a9b776af5769178efb9f248ca45aec9ae5f5d99 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/210f985ab8344ab9024b0e8866757143d08c6a402d3b38801bfe06bad03d0471.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1142370df49560afeac8574f2a22322501fecc1edb3b10a28b08d0c0f7449e97 +size 13465 diff --git a/data/2025/2504_10xxx/2504.10478/images/2129eafde804b7c6253031f96635d28714155c8f61913d221bec62adc0d05819.jpg b/data/2025/2504_10xxx/2504.10478/images/2129eafde804b7c6253031f96635d28714155c8f61913d221bec62adc0d05819.jpg new file mode 100644 index 0000000000000000000000000000000000000000..054f11e086e288daddcc05896e783342c64126b3 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/2129eafde804b7c6253031f96635d28714155c8f61913d221bec62adc0d05819.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fb2f9d03649bc11fe37d8d0ac36962a628cc1bd403b94f15cc791dd8244ab911 +size 7900 diff --git a/data/2025/2504_10xxx/2504.10478/images/219b5d70f1361d0524e9e43e4ca877a094875ed4025d0d18eae4a0143396e6c9.jpg b/data/2025/2504_10xxx/2504.10478/images/219b5d70f1361d0524e9e43e4ca877a094875ed4025d0d18eae4a0143396e6c9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7b0c6c0b75465a45341e736004cc1d59dc7125e8 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/219b5d70f1361d0524e9e43e4ca877a094875ed4025d0d18eae4a0143396e6c9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b23b45a48b8bccfb59fd579f96fd01b57a05b24e052f44eb6e923161a77d597f +size 6509 diff --git a/data/2025/2504_10xxx/2504.10478/images/235ace59d9f3a9fd4138d33eda84ec30aa842d50403a6325e09b14c22038f792.jpg b/data/2025/2504_10xxx/2504.10478/images/235ace59d9f3a9fd4138d33eda84ec30aa842d50403a6325e09b14c22038f792.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b0069d00a5188dec8d022f277b853a6ba968fe10 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/235ace59d9f3a9fd4138d33eda84ec30aa842d50403a6325e09b14c22038f792.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:030791b5f5a5645d6619e4f2e87f3fa5e91dcbf250ca93fbcbe85e848e4a9887 +size 9590 diff --git a/data/2025/2504_10xxx/2504.10478/images/25ebeb290d7259127374c9c05e2e0ce5075d7b65dfef8be8699ed49268e3b01a.jpg b/data/2025/2504_10xxx/2504.10478/images/25ebeb290d7259127374c9c05e2e0ce5075d7b65dfef8be8699ed49268e3b01a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..cee9c37cd2f3c45e343b2013f44c17d1cabb211a --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/25ebeb290d7259127374c9c05e2e0ce5075d7b65dfef8be8699ed49268e3b01a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:37f7d8d89c0e8f3a7a20752934bd17fe4b2faafcf7bd9614b4f2b9014c09a8f6 +size 10812 diff --git a/data/2025/2504_10xxx/2504.10478/images/268a4214b18b790ae18f4c66932eb03706a3662c1ff2bbd2235424d0bcd92783.jpg b/data/2025/2504_10xxx/2504.10478/images/268a4214b18b790ae18f4c66932eb03706a3662c1ff2bbd2235424d0bcd92783.jpg new file mode 100644 index 0000000000000000000000000000000000000000..30f07492adc2b9c5cfc2fe3d3a149a62781a3459 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/268a4214b18b790ae18f4c66932eb03706a3662c1ff2bbd2235424d0bcd92783.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c6004793f1209f4e7972c6d1360aebad19f9d0dfb86194ae16b59918b31ccacd +size 15591 diff --git a/data/2025/2504_10xxx/2504.10478/images/281b7e3b04727e31eb48c9f9eb0dac923c0ed6c74f21659a0a4d939eab7dbcdc.jpg b/data/2025/2504_10xxx/2504.10478/images/281b7e3b04727e31eb48c9f9eb0dac923c0ed6c74f21659a0a4d939eab7dbcdc.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8f1f63e57dffd25b92643919da408389aa05084d --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/281b7e3b04727e31eb48c9f9eb0dac923c0ed6c74f21659a0a4d939eab7dbcdc.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0df986bce2bc7c9dea0b241422bb903d12049eb4efb9baa1384bd4d1c208b95f +size 20051 diff --git a/data/2025/2504_10xxx/2504.10478/images/293bfc0fc27150b8eec52e2f179c7a8bee6d05de89eb6f954bb89407cf535e84.jpg b/data/2025/2504_10xxx/2504.10478/images/293bfc0fc27150b8eec52e2f179c7a8bee6d05de89eb6f954bb89407cf535e84.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7791841f9c0c7e1cf411bce95e2ddb1826e02498 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/293bfc0fc27150b8eec52e2f179c7a8bee6d05de89eb6f954bb89407cf535e84.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a04757101998b1b2132ad60f156a177cd08f34ba3212e365e931dda12bfa1e03 +size 11828 diff --git a/data/2025/2504_10xxx/2504.10478/images/2afd9d9121757c862096b1a3d7ace4ff98bfb6719ea911ea906426aa924ec8b7.jpg b/data/2025/2504_10xxx/2504.10478/images/2afd9d9121757c862096b1a3d7ace4ff98bfb6719ea911ea906426aa924ec8b7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..88aaeef2694d8c86b3120172d7aa080b0b8796b2 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/2afd9d9121757c862096b1a3d7ace4ff98bfb6719ea911ea906426aa924ec8b7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a760a472037bbfe174c33acb195f5f70a926358f2653daadb5c939005b3ca11e +size 5652 diff --git a/data/2025/2504_10xxx/2504.10478/images/2b5bc1934d7c465d670fdb3f31d7c83014925d80d56f23229733f5fb3a5e4176.jpg b/data/2025/2504_10xxx/2504.10478/images/2b5bc1934d7c465d670fdb3f31d7c83014925d80d56f23229733f5fb3a5e4176.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7df8e1a42efc01f915de1ad7d33647c0a94cf0bc --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/2b5bc1934d7c465d670fdb3f31d7c83014925d80d56f23229733f5fb3a5e4176.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:21a104152ce30b4df690ac2a885320bd708430322dec1ec3f7eadba22d77b4ef +size 14682 diff --git a/data/2025/2504_10xxx/2504.10478/images/2ba83d43ee06d46a52d1c0a7af9c34e2f33d4173d7a4793a62c64f013efa1d66.jpg b/data/2025/2504_10xxx/2504.10478/images/2ba83d43ee06d46a52d1c0a7af9c34e2f33d4173d7a4793a62c64f013efa1d66.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2af1f5a9b6552a134a78b34d8a7270fba8e35825 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/2ba83d43ee06d46a52d1c0a7af9c34e2f33d4173d7a4793a62c64f013efa1d66.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7d0f9531cb8b94d8c5f9c9149279cba99d7a790036cc3a0d64803923ac7945a4 +size 11187 diff --git a/data/2025/2504_10xxx/2504.10478/images/2f678190c34bc263f209d564e733b949a49f0c5b80c07037e411d4cdccc2776b.jpg b/data/2025/2504_10xxx/2504.10478/images/2f678190c34bc263f209d564e733b949a49f0c5b80c07037e411d4cdccc2776b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f92b3c7a1fb05e1b2113536852faa3a395534ba9 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/2f678190c34bc263f209d564e733b949a49f0c5b80c07037e411d4cdccc2776b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:778d113d121073a9934929ea1b8d8e236bd86bc5ccdfc1e77f5891ba84985084 +size 11138 diff --git a/data/2025/2504_10xxx/2504.10478/images/335ad53fb8c267ab9f4d31675ae0ac9c056c01642d53698f0a201571736eb81f.jpg b/data/2025/2504_10xxx/2504.10478/images/335ad53fb8c267ab9f4d31675ae0ac9c056c01642d53698f0a201571736eb81f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2ce0d9e8ef3cbca36a9204434ba65d48947e9881 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/335ad53fb8c267ab9f4d31675ae0ac9c056c01642d53698f0a201571736eb81f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2b1f27efbf7b470aa82eb2762d299f82ce30d66a9d76560025bc5b4528ec5e2a +size 5337 diff --git a/data/2025/2504_10xxx/2504.10478/images/33b82e197b5263aaec61d52cf001bea59054b3e26d2244a7f955f99bbd538652.jpg b/data/2025/2504_10xxx/2504.10478/images/33b82e197b5263aaec61d52cf001bea59054b3e26d2244a7f955f99bbd538652.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ec5f22f88d65425170c5d4dd0043ad9db7c0c6b7 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/33b82e197b5263aaec61d52cf001bea59054b3e26d2244a7f955f99bbd538652.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f33684418dd300d6c319cea3db6b937b2301159789e178e32d901e22a64162bf +size 62004 diff --git a/data/2025/2504_10xxx/2504.10478/images/34da879b88c056d54890866c39e610bb90307c00176ff655d5e91b80505a2801.jpg b/data/2025/2504_10xxx/2504.10478/images/34da879b88c056d54890866c39e610bb90307c00176ff655d5e91b80505a2801.jpg new file mode 100644 index 0000000000000000000000000000000000000000..919cad98a76ee40311e14d505643352c362df96a --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/34da879b88c056d54890866c39e610bb90307c00176ff655d5e91b80505a2801.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0f32f7663c3c53afe1c050220e72b151530b57794dbfcaf4713ef3eb9d913632 +size 4785 diff --git a/data/2025/2504_10xxx/2504.10478/images/34dd8473c7bea803c37a13a33b349b2cd610686e846013957bdec7e504f82175.jpg b/data/2025/2504_10xxx/2504.10478/images/34dd8473c7bea803c37a13a33b349b2cd610686e846013957bdec7e504f82175.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3bdf618b42e5583041d013f2de6f0fa1c287fa3c --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/34dd8473c7bea803c37a13a33b349b2cd610686e846013957bdec7e504f82175.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:889668c83537d7139d7851978f115f2286f602909b45b6f508faa812c41da722 +size 19997 diff --git a/data/2025/2504_10xxx/2504.10478/images/3536f4b1df50cd66ddd3bfbccb80c35fc10834c00925c31728553b31b2fbfd2a.jpg b/data/2025/2504_10xxx/2504.10478/images/3536f4b1df50cd66ddd3bfbccb80c35fc10834c00925c31728553b31b2fbfd2a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2acfa5ce8c4f7ea33fe587b8c5b27b993b6f04ae --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/3536f4b1df50cd66ddd3bfbccb80c35fc10834c00925c31728553b31b2fbfd2a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:278647c0de47cb353b1a1414ebc443a4d903de8c2b5cd63d1fb7b012442ab745 +size 21253 diff --git a/data/2025/2504_10xxx/2504.10478/images/38159ef7ae6f51b78b1f51b27ac07d7019b9006e97ddbd5054b02d32b076acac.jpg b/data/2025/2504_10xxx/2504.10478/images/38159ef7ae6f51b78b1f51b27ac07d7019b9006e97ddbd5054b02d32b076acac.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0027271b8fe9e04a35307888cd006640abb79877 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/38159ef7ae6f51b78b1f51b27ac07d7019b9006e97ddbd5054b02d32b076acac.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bd1b0dd7d6ef4d3e8eb9c8093bbfc2318926f843db5c3524e6910b24cbed54f6 +size 35228 diff --git a/data/2025/2504_10xxx/2504.10478/images/393c4e789004c9b1d25cabc33e6caa251b9a30ac8408881cef3e9408d407b11f.jpg b/data/2025/2504_10xxx/2504.10478/images/393c4e789004c9b1d25cabc33e6caa251b9a30ac8408881cef3e9408d407b11f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a8be768ec9b169bef3d5b2033386240ef52b13d6 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/393c4e789004c9b1d25cabc33e6caa251b9a30ac8408881cef3e9408d407b11f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fc57b86b4221f242f04af291ad7c48ffb9b8ed2753f768e8ca61544eeeb67d3e +size 17571 diff --git a/data/2025/2504_10xxx/2504.10478/images/39f305f282f3aa2b8d837c2869699fbd2bb84a64fa117b53ed57a515d2e954a8.jpg b/data/2025/2504_10xxx/2504.10478/images/39f305f282f3aa2b8d837c2869699fbd2bb84a64fa117b53ed57a515d2e954a8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1499f29b987b4f991dc3f23bf2b8334fdf033212 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/39f305f282f3aa2b8d837c2869699fbd2bb84a64fa117b53ed57a515d2e954a8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:717a6077123a3cbeaf2852a4568b749877268debfdead4bd4767f9cede25bc09 +size 5554 diff --git a/data/2025/2504_10xxx/2504.10478/images/3a2e6bad1cb29144c1f3bf2705e2569db4a1b64e5d3e95fc84c11521e6263501.jpg b/data/2025/2504_10xxx/2504.10478/images/3a2e6bad1cb29144c1f3bf2705e2569db4a1b64e5d3e95fc84c11521e6263501.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f1672536c6d9035f79f4e1b2cf2449e3ed191328 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/3a2e6bad1cb29144c1f3bf2705e2569db4a1b64e5d3e95fc84c11521e6263501.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:800d02f7ed399c0b7b1a1b7e928bbc9d131de28ff0a212f56747fbfb6402c8df +size 4134 diff --git a/data/2025/2504_10xxx/2504.10478/images/3a918ee88871d88fcb837e4186f8c5168750ffbf4b0c2ac0d7564ec6d2f81fd3.jpg b/data/2025/2504_10xxx/2504.10478/images/3a918ee88871d88fcb837e4186f8c5168750ffbf4b0c2ac0d7564ec6d2f81fd3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e5832fd740d4163c3331bc0e7cf37e7cc34ecf2d --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/3a918ee88871d88fcb837e4186f8c5168750ffbf4b0c2ac0d7564ec6d2f81fd3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9e44fcdeb7c21e3811f58fa6125b30af27ce606d2c26e011c58d406b33581ee2 +size 9097 diff --git a/data/2025/2504_10xxx/2504.10478/images/3b4b375b012d324a664828356e9ada89ab2d796bd0c978efdd3c6ccc0233b487.jpg b/data/2025/2504_10xxx/2504.10478/images/3b4b375b012d324a664828356e9ada89ab2d796bd0c978efdd3c6ccc0233b487.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ab38150ee1936b32cdd574fd1d8678bc3cdca113 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/3b4b375b012d324a664828356e9ada89ab2d796bd0c978efdd3c6ccc0233b487.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0efb37e419124641290010d255a4f297c0d498f3d02e572a793821f5b4c18110 +size 9907 diff --git a/data/2025/2504_10xxx/2504.10478/images/3d13f320227abed29d629f8ace04823e9059603f82a24fb7c3c0d25ac51e5eb4.jpg b/data/2025/2504_10xxx/2504.10478/images/3d13f320227abed29d629f8ace04823e9059603f82a24fb7c3c0d25ac51e5eb4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..46cbc4e9f76973b007052d181650b253aaccbd45 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/3d13f320227abed29d629f8ace04823e9059603f82a24fb7c3c0d25ac51e5eb4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d475812468b396f27b08a3925307efa758fd0e5dd13bdd2a5f59f50380416d94 +size 11183 diff --git a/data/2025/2504_10xxx/2504.10478/images/3eb53d8c924bb92658ec5635c7b2ae63fb3273756bce235deb3ec5596f40bd57.jpg b/data/2025/2504_10xxx/2504.10478/images/3eb53d8c924bb92658ec5635c7b2ae63fb3273756bce235deb3ec5596f40bd57.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8402d391d6b6934748dc4ee839c29ecbf271d2ec --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/3eb53d8c924bb92658ec5635c7b2ae63fb3273756bce235deb3ec5596f40bd57.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:33fb3e3593690b9a1379c946233757dcbb96bddde950152458080462846c238c +size 10702 diff --git a/data/2025/2504_10xxx/2504.10478/images/3f01e7c236fd71d192033777739815a43af7536fe7a6e1121aeed7d48f33e627.jpg b/data/2025/2504_10xxx/2504.10478/images/3f01e7c236fd71d192033777739815a43af7536fe7a6e1121aeed7d48f33e627.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f5827f6e95ab4d7dee7523dd27e675eab7327b4f --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/3f01e7c236fd71d192033777739815a43af7536fe7a6e1121aeed7d48f33e627.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:df1d479c8d413c84805b25bfc406c4ccc00b596f91db0dc6f2a6a750dd134647 +size 9679 diff --git a/data/2025/2504_10xxx/2504.10478/images/401631f09a461d1e42f689302a470ca43729d0d0654d63edcc98eccaf75cd6ba.jpg b/data/2025/2504_10xxx/2504.10478/images/401631f09a461d1e42f689302a470ca43729d0d0654d63edcc98eccaf75cd6ba.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8cc85973fd5b6c01672bda25f3a446da99b524bf --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/401631f09a461d1e42f689302a470ca43729d0d0654d63edcc98eccaf75cd6ba.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:43037940a11755be0bbcff674646efb7d5b25d1bec4951edd9da5b2f080e37c3 +size 60959 diff --git a/data/2025/2504_10xxx/2504.10478/images/408e81cdfc67395009c30c39aad9c4a31fa71dc028099dd11d2d322e044cc302.jpg b/data/2025/2504_10xxx/2504.10478/images/408e81cdfc67395009c30c39aad9c4a31fa71dc028099dd11d2d322e044cc302.jpg new file mode 100644 index 0000000000000000000000000000000000000000..797b159efc1f0cceef7a639a64693d3a91329bb8 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/408e81cdfc67395009c30c39aad9c4a31fa71dc028099dd11d2d322e044cc302.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b655d4f1ae32ccfd2111531bba107ceab5dd5818613444068f8a167de2d3e70e +size 10505 diff --git a/data/2025/2504_10xxx/2504.10478/images/445fd20e4c1373f0114643f0a40149f387735f7d61bc9ab734ce91f06f149ec6.jpg b/data/2025/2504_10xxx/2504.10478/images/445fd20e4c1373f0114643f0a40149f387735f7d61bc9ab734ce91f06f149ec6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ad6672e287bac1add7ed360c4e81bf529dbd982b --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/445fd20e4c1373f0114643f0a40149f387735f7d61bc9ab734ce91f06f149ec6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4a01dfd3c7e244bffd04cf4ac9db4de84f1caadde53eba5ef232dda1da079f37 +size 14332 diff --git a/data/2025/2504_10xxx/2504.10478/images/4546a62db2a2c93d5719df6b62375b8b3d804a1758924ed0422076a662d1a358.jpg b/data/2025/2504_10xxx/2504.10478/images/4546a62db2a2c93d5719df6b62375b8b3d804a1758924ed0422076a662d1a358.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2bf056dd96bacae1f241b307ada017a917e274b0 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/4546a62db2a2c93d5719df6b62375b8b3d804a1758924ed0422076a662d1a358.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b6cb320f3e3750925718217dcc0a63652f5d3052eb53bb26c24734ffc8049ea0 +size 96091 diff --git a/data/2025/2504_10xxx/2504.10478/images/4784db23bc7951ee2fc6656f65f8c2d3d009a5771392f48ebe1e24aa859028e1.jpg b/data/2025/2504_10xxx/2504.10478/images/4784db23bc7951ee2fc6656f65f8c2d3d009a5771392f48ebe1e24aa859028e1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bd311066cb033d6f6332244dee71b951bfdfa9ff --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/4784db23bc7951ee2fc6656f65f8c2d3d009a5771392f48ebe1e24aa859028e1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5f3df6d8ba0ee7f2020ab89d9d68d2dff1015583f8a1b9116736223b039b4070 +size 21857 diff --git a/data/2025/2504_10xxx/2504.10478/images/4970e2bbeb01a58d65034f7eee2d971620f82a124a24f05a15a0868fd8060784.jpg b/data/2025/2504_10xxx/2504.10478/images/4970e2bbeb01a58d65034f7eee2d971620f82a124a24f05a15a0868fd8060784.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4a02c91ca7199985da8ad2f1c3d3810795416038 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/4970e2bbeb01a58d65034f7eee2d971620f82a124a24f05a15a0868fd8060784.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9e6aae531c8d3b494c788654e5568b473e0310ffa924ff3605024b4ecb79468a +size 11846 diff --git a/data/2025/2504_10xxx/2504.10478/images/4c43efb60dc7fd63fa7e0bf6fe8153abec532160cb2af6bb071e080da392c5f2.jpg b/data/2025/2504_10xxx/2504.10478/images/4c43efb60dc7fd63fa7e0bf6fe8153abec532160cb2af6bb071e080da392c5f2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..34d65917125e12c5309d695407dcd0520f06f225 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/4c43efb60dc7fd63fa7e0bf6fe8153abec532160cb2af6bb071e080da392c5f2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3bd0dbfaeb2e2f5e81f8dd0e543b8269bceab9e8e6f41efe809afd3dd85dca4d +size 13222 diff --git a/data/2025/2504_10xxx/2504.10478/images/4cca06e5c218704714c26e86a6ac995e438bcee795019154e74edbfe68091d0f.jpg b/data/2025/2504_10xxx/2504.10478/images/4cca06e5c218704714c26e86a6ac995e438bcee795019154e74edbfe68091d0f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..62198dd3da2f63ce7b65fe2cfb8a56dd5e7789fd --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/4cca06e5c218704714c26e86a6ac995e438bcee795019154e74edbfe68091d0f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:02f2eee6c492bffdf892d7df2ee98c95b7eed9fae64aa2267766cfc5c4250948 +size 14405 diff --git a/data/2025/2504_10xxx/2504.10478/images/510bcdc6999edfdaf5ad51d42c4d837bb3ee069afcc24f8a26bd6ee5bc71c4e7.jpg b/data/2025/2504_10xxx/2504.10478/images/510bcdc6999edfdaf5ad51d42c4d837bb3ee069afcc24f8a26bd6ee5bc71c4e7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..50ceba08bb5cf7e21d0f11662d4bb75c14cc0191 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/510bcdc6999edfdaf5ad51d42c4d837bb3ee069afcc24f8a26bd6ee5bc71c4e7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5943078bd00acbc57379de86fb252dc3a78b65539d4dbf9f67019376c7c2484d +size 16865 diff --git a/data/2025/2504_10xxx/2504.10478/images/5137cb2f59693ab411059d8208f1a96ff60109df3970efcaaad046b30b11e30c.jpg b/data/2025/2504_10xxx/2504.10478/images/5137cb2f59693ab411059d8208f1a96ff60109df3970efcaaad046b30b11e30c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c53592a07e39c7c37c584cc56b9a27e6bddb5c4f --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/5137cb2f59693ab411059d8208f1a96ff60109df3970efcaaad046b30b11e30c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:725d14dc200925a9c1946c481b263eaba17537448a7de2159601bd6c21e3f42d +size 25851 diff --git a/data/2025/2504_10xxx/2504.10478/images/5186f02cf14107fb5f45cd06a48b8a84b8d148a57fc1f02fb24e57f2de6f0b2c.jpg b/data/2025/2504_10xxx/2504.10478/images/5186f02cf14107fb5f45cd06a48b8a84b8d148a57fc1f02fb24e57f2de6f0b2c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d2870444fd5eb4c8d2aa0cd5e641594a4f407746 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/5186f02cf14107fb5f45cd06a48b8a84b8d148a57fc1f02fb24e57f2de6f0b2c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dcc18e1ddfa383b70317a8d32cdaa4a110f641c80919f8aff8d1318240c7cd1c +size 14490 diff --git a/data/2025/2504_10xxx/2504.10478/images/525268009e6075c1325653ae3ac1f4d2d550342360d9da06d7710798d251f1d6.jpg b/data/2025/2504_10xxx/2504.10478/images/525268009e6075c1325653ae3ac1f4d2d550342360d9da06d7710798d251f1d6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6d4ad926d9378d50a5cfba25fa69a93599b4f575 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/525268009e6075c1325653ae3ac1f4d2d550342360d9da06d7710798d251f1d6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b05e0662cc3e67b9ae192e132e8d7c6382275ba09145fe196bb83bce49ac12b5 +size 8955 diff --git a/data/2025/2504_10xxx/2504.10478/images/528a40964d3bfda6c2aef41d322a1d87b86f248ba95412cafdaaf3724d5c8979.jpg b/data/2025/2504_10xxx/2504.10478/images/528a40964d3bfda6c2aef41d322a1d87b86f248ba95412cafdaaf3724d5c8979.jpg new file mode 100644 index 0000000000000000000000000000000000000000..33762222dcd7ba9d30f29197af0a99c3b7c8d14d --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/528a40964d3bfda6c2aef41d322a1d87b86f248ba95412cafdaaf3724d5c8979.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2181ab874abc2a302c60f7a4da68716e1905d6ad6f9eb7077ea5fdc25a6fc0d6 +size 17526 diff --git a/data/2025/2504_10xxx/2504.10478/images/534a1915e7237bbe8eb59cd3e2becaa163ccc90f8b40a09cb87b02ff2f834f83.jpg b/data/2025/2504_10xxx/2504.10478/images/534a1915e7237bbe8eb59cd3e2becaa163ccc90f8b40a09cb87b02ff2f834f83.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9eba8e370d95bdf0738c31c98361146a3ab39204 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/534a1915e7237bbe8eb59cd3e2becaa163ccc90f8b40a09cb87b02ff2f834f83.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:01022496eeb0299d422d2de930d8a74c35072387d94e5e464e06ca501c38d900 +size 14239 diff --git a/data/2025/2504_10xxx/2504.10478/images/5389864675d1dd6b4715e8cae0d0e3a2a961b2c51f11fab71e30b0988c041768.jpg b/data/2025/2504_10xxx/2504.10478/images/5389864675d1dd6b4715e8cae0d0e3a2a961b2c51f11fab71e30b0988c041768.jpg new file mode 100644 index 0000000000000000000000000000000000000000..59f9a8c83880554af59c6d2d24b15b0f59ca6736 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/5389864675d1dd6b4715e8cae0d0e3a2a961b2c51f11fab71e30b0988c041768.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fc4cd1c72fb89a129aa2e205291da3af8106b8dc0de62a73ca67e146fae719cb +size 6715 diff --git a/data/2025/2504_10xxx/2504.10478/images/546890eae7b6307835210a35ac4546692989e395b2041aa92121b17022a37557.jpg b/data/2025/2504_10xxx/2504.10478/images/546890eae7b6307835210a35ac4546692989e395b2041aa92121b17022a37557.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6b50fc07cefb6a667bc9150eef2bc5be9c537c82 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/546890eae7b6307835210a35ac4546692989e395b2041aa92121b17022a37557.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:48fa6b31407bc56056e2059b47fbab7a9cf8116adc0a41299b9b8917d2abcddb +size 25281 diff --git a/data/2025/2504_10xxx/2504.10478/images/55bfba616e05bf812288c69d8876b6ddee39b2e5e60a5aefda879cfecbd5feea.jpg b/data/2025/2504_10xxx/2504.10478/images/55bfba616e05bf812288c69d8876b6ddee39b2e5e60a5aefda879cfecbd5feea.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6ad32092c18e07bedf3d43d48ce5ca5316d139dc --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/55bfba616e05bf812288c69d8876b6ddee39b2e5e60a5aefda879cfecbd5feea.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6647eac2730fd66b3269b97709b528c8d2d818726624647db4e8cf3e6e9e0b8b +size 6537 diff --git a/data/2025/2504_10xxx/2504.10478/images/5667e64bdc969d25573bf150b13c2046ca2168b091045a68f3bfe49c56c2ea9e.jpg b/data/2025/2504_10xxx/2504.10478/images/5667e64bdc969d25573bf150b13c2046ca2168b091045a68f3bfe49c56c2ea9e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b57144d92ad9946a58a1185211931e9396054f60 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/5667e64bdc969d25573bf150b13c2046ca2168b091045a68f3bfe49c56c2ea9e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d5265b49fa29eb1fe9863efcf128f25e37d5c155c61269f9eb893e7b5ed83702 +size 10322 diff --git a/data/2025/2504_10xxx/2504.10478/images/568239dfe0d053038302fc7fbde2c6756bc3312c7e80cec28090883679638505.jpg b/data/2025/2504_10xxx/2504.10478/images/568239dfe0d053038302fc7fbde2c6756bc3312c7e80cec28090883679638505.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9ed0b1a26d6786b700349a9782d1e4ed6c3ccc6b --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/568239dfe0d053038302fc7fbde2c6756bc3312c7e80cec28090883679638505.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a7582f06d82c61e3fe1ed542b808a4baf1acfd71c39b0e31cfeb21ae0673e277 +size 7596 diff --git a/data/2025/2504_10xxx/2504.10478/images/572c6f391423923f00eb57fedd5c685d498bf084b4ad7b1a487a5c5595d86909.jpg b/data/2025/2504_10xxx/2504.10478/images/572c6f391423923f00eb57fedd5c685d498bf084b4ad7b1a487a5c5595d86909.jpg new file mode 100644 index 0000000000000000000000000000000000000000..799a1a41d7e0ee5ddda18997db7046db0500ead1 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/572c6f391423923f00eb57fedd5c685d498bf084b4ad7b1a487a5c5595d86909.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c38ceeaa643c1deba49810b558fd120dbd7482bb46fcdb9d9ee5f954a346ad84 +size 4605 diff --git a/data/2025/2504_10xxx/2504.10478/images/5a57a6efe0c941d086713a9c86373744f480001ca15dc55fec49bde54b75585e.jpg b/data/2025/2504_10xxx/2504.10478/images/5a57a6efe0c941d086713a9c86373744f480001ca15dc55fec49bde54b75585e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..47f3b05e8f7c160bbe219ee210ea528578c7178f --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/5a57a6efe0c941d086713a9c86373744f480001ca15dc55fec49bde54b75585e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:098150ccf1ffaf3cb7f699dbacc1798f0f5b8cee520e3fbbda2d22726c33af44 +size 9320 diff --git a/data/2025/2504_10xxx/2504.10478/images/5aa59d6e2aba258b1051ce25b0904b17cd2e7490d5e852f24f61c5460a902111.jpg b/data/2025/2504_10xxx/2504.10478/images/5aa59d6e2aba258b1051ce25b0904b17cd2e7490d5e852f24f61c5460a902111.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e4a6f271ae89c8718d7f71f6b3fe2b5a793a9a25 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/5aa59d6e2aba258b1051ce25b0904b17cd2e7490d5e852f24f61c5460a902111.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:252162e6e09e9791e177b0218229cabf61432367cd8137f2429b511a053d937a +size 15746 diff --git a/data/2025/2504_10xxx/2504.10478/images/5c78239b846481b7bf9c97f9b381676efebb712f06044215ffc0828a1d3181dd.jpg b/data/2025/2504_10xxx/2504.10478/images/5c78239b846481b7bf9c97f9b381676efebb712f06044215ffc0828a1d3181dd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..dea63c240617048b09c3b0d35ec44d6259654a50 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/5c78239b846481b7bf9c97f9b381676efebb712f06044215ffc0828a1d3181dd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e6ae34b8dd5eb2509a4582babd23cdd99593ac9b4a1c2cdf082a4208480dfe19 +size 47548 diff --git a/data/2025/2504_10xxx/2504.10478/images/5cc7d3e6e8dc1b3da5bcfeaac5e13c685f56aae0ab567a53aa222a84743494fd.jpg b/data/2025/2504_10xxx/2504.10478/images/5cc7d3e6e8dc1b3da5bcfeaac5e13c685f56aae0ab567a53aa222a84743494fd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7fc0c27f3ad83b96439de2311650101aeb590b52 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/5cc7d3e6e8dc1b3da5bcfeaac5e13c685f56aae0ab567a53aa222a84743494fd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b7a008b5b85a2ac28e2042a76c233801f9dcaf7924ecd474152a2f2a4c19e9d6 +size 10393 diff --git a/data/2025/2504_10xxx/2504.10478/images/5dcada9b466ab9fe7f31300e240f281a0a980bf61c1e1d8185ad399f27eb0286.jpg b/data/2025/2504_10xxx/2504.10478/images/5dcada9b466ab9fe7f31300e240f281a0a980bf61c1e1d8185ad399f27eb0286.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a0dd233a1577a3bf83de537d09fe430808eee565 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/5dcada9b466ab9fe7f31300e240f281a0a980bf61c1e1d8185ad399f27eb0286.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:813181322177b31d8f11c660ce3a38de7ca1f94278578485a8ef1bdf53704750 +size 8082 diff --git a/data/2025/2504_10xxx/2504.10478/images/60a8b9a9af69b7127d68729341fe2361371efa2b3e4bacb8eec3f42fc9ec84a0.jpg b/data/2025/2504_10xxx/2504.10478/images/60a8b9a9af69b7127d68729341fe2361371efa2b3e4bacb8eec3f42fc9ec84a0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d056d879678d78f0ffb1be8a30de602b9d67b54b --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/60a8b9a9af69b7127d68729341fe2361371efa2b3e4bacb8eec3f42fc9ec84a0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d11260bee77f9099ff1fe72ee5095119bf721d981df6761f9aef5fb438134475 +size 11226 diff --git a/data/2025/2504_10xxx/2504.10478/images/617a18e5724e8faa2af11981ddb7e906d78aa4383d6f8ef809a5eba43aade27f.jpg b/data/2025/2504_10xxx/2504.10478/images/617a18e5724e8faa2af11981ddb7e906d78aa4383d6f8ef809a5eba43aade27f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0946577b698d684fa85ae55f128ab450197b0435 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/617a18e5724e8faa2af11981ddb7e906d78aa4383d6f8ef809a5eba43aade27f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4923d2373bc00430df7892e4914cd68f2b5eaf5cc8143c9214d525334dc8771e +size 4952 diff --git a/data/2025/2504_10xxx/2504.10478/images/61852e157d7ade30f6e2c0703696bface8d53ae9ba6a912c736f52927c4780a8.jpg b/data/2025/2504_10xxx/2504.10478/images/61852e157d7ade30f6e2c0703696bface8d53ae9ba6a912c736f52927c4780a8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3cf4d37e815fbad4c74ca4b4d474699f0a72da51 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/61852e157d7ade30f6e2c0703696bface8d53ae9ba6a912c736f52927c4780a8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d37b43884a8c2873164a229175d6d44ebe44e883fff24f98cf1a722b683c0a74 +size 9711 diff --git a/data/2025/2504_10xxx/2504.10478/images/62c0999f68f4b3f6b5180018a04cacb5187ec510a244fb389d70096c3972f24c.jpg b/data/2025/2504_10xxx/2504.10478/images/62c0999f68f4b3f6b5180018a04cacb5187ec510a244fb389d70096c3972f24c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e9b47e51529226fc03f2d9f2f294a5ab218e4293 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/62c0999f68f4b3f6b5180018a04cacb5187ec510a244fb389d70096c3972f24c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9087b46c424be1c4de86e0fcc357e17629d7ae81023340a071fe7948629f0cd9 +size 5230 diff --git a/data/2025/2504_10xxx/2504.10478/images/6357c2ac829597a5cabbd3f6f2c13751aeaac97b2934f7e1144f19fda33b7246.jpg b/data/2025/2504_10xxx/2504.10478/images/6357c2ac829597a5cabbd3f6f2c13751aeaac97b2934f7e1144f19fda33b7246.jpg new file mode 100644 index 0000000000000000000000000000000000000000..46452a263682ff5fc7cef5bac3faf98b5aa68089 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/6357c2ac829597a5cabbd3f6f2c13751aeaac97b2934f7e1144f19fda33b7246.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2554d911dc071cafb295880ce5a9e545c872928fc3597716ee7ea57461345edb +size 14729 diff --git a/data/2025/2504_10xxx/2504.10478/images/6415bf26abc61b3f3704f1e93f285ad0c1d640bbb79a1978e63f250e5f01c217.jpg b/data/2025/2504_10xxx/2504.10478/images/6415bf26abc61b3f3704f1e93f285ad0c1d640bbb79a1978e63f250e5f01c217.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ccc5228772da6db979cda81313c7f44d649e1975 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/6415bf26abc61b3f3704f1e93f285ad0c1d640bbb79a1978e63f250e5f01c217.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eee9c31f872461efd6df8e1de599d6b7f13aa920e7c85c2e85f992f3ae10d1fa +size 15875 diff --git a/data/2025/2504_10xxx/2504.10478/images/6505c814dcb35aa9757d8be7050bd89319b4a0aae6db8d965dfe1bf81985e105.jpg b/data/2025/2504_10xxx/2504.10478/images/6505c814dcb35aa9757d8be7050bd89319b4a0aae6db8d965dfe1bf81985e105.jpg new file mode 100644 index 0000000000000000000000000000000000000000..086e188ea4bd73fba81e405c7b41db12a68ed149 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/6505c814dcb35aa9757d8be7050bd89319b4a0aae6db8d965dfe1bf81985e105.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:290f3fcd6f6694cfd40da35c1b249a44768a6e3f441479c74e8da74bdf14ede8 +size 14862 diff --git a/data/2025/2504_10xxx/2504.10478/images/6583cb61c58420a22e2b481d3ecd6ab5badcb2086aeae20b3945609becb32131.jpg b/data/2025/2504_10xxx/2504.10478/images/6583cb61c58420a22e2b481d3ecd6ab5badcb2086aeae20b3945609becb32131.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bed23eb6a5082d44a92bee865dba5b230a2734e0 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/6583cb61c58420a22e2b481d3ecd6ab5badcb2086aeae20b3945609becb32131.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cb78cfde9f2595bb529edacea8047e29dc6b6adbf3e0348b82e5a3a710264e95 +size 13075 diff --git a/data/2025/2504_10xxx/2504.10478/images/66ac2568e0176bd956a7f1d1f5e7aed9e9fba0733ce10c464bbf875e9c49fbad.jpg b/data/2025/2504_10xxx/2504.10478/images/66ac2568e0176bd956a7f1d1f5e7aed9e9fba0733ce10c464bbf875e9c49fbad.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a727c9286552b5540a623c073dc331b998be22a2 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/66ac2568e0176bd956a7f1d1f5e7aed9e9fba0733ce10c464bbf875e9c49fbad.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5c6d41dce0dcd0e320ad22705804d49ae2adf6eb390ceb0a8f5c099636beb48c +size 3045 diff --git a/data/2025/2504_10xxx/2504.10478/images/672c0c412effe3f158ea40afc7de6980e89697915f6ff25df8aa79757cb6e4a2.jpg b/data/2025/2504_10xxx/2504.10478/images/672c0c412effe3f158ea40afc7de6980e89697915f6ff25df8aa79757cb6e4a2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..01ed5aebbb2da76d2593a16af31a771e2b478916 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/672c0c412effe3f158ea40afc7de6980e89697915f6ff25df8aa79757cb6e4a2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3eef479b6f9204d81393f26ff2fec3bd4de913e22c77f4f902d47f29c9ecc9b0 +size 9065 diff --git a/data/2025/2504_10xxx/2504.10478/images/6879e45cb82d6e4e3cb71ac588c9076b39549c4f56feda977179787fe237fef8.jpg b/data/2025/2504_10xxx/2504.10478/images/6879e45cb82d6e4e3cb71ac588c9076b39549c4f56feda977179787fe237fef8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..79a00debb7789bcfb298df4fe9f7166ad38d2e1d --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/6879e45cb82d6e4e3cb71ac588c9076b39549c4f56feda977179787fe237fef8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4a9ad915d253d33d51f74d859284792f94eba0c2ae5454dc5b13dd2ba37072b2 +size 14354 diff --git a/data/2025/2504_10xxx/2504.10478/images/68f30ce20b9b540bd97d34e0e8afd1dd50e712a9365bf753e3341f92825fd290.jpg b/data/2025/2504_10xxx/2504.10478/images/68f30ce20b9b540bd97d34e0e8afd1dd50e712a9365bf753e3341f92825fd290.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e0d6588a31b8efcbb3a98fae3604f3ce0ee08a1b --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/68f30ce20b9b540bd97d34e0e8afd1dd50e712a9365bf753e3341f92825fd290.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cbb2f515ee82e94351bd686e49e002cfdef160d94d3e97f69dfd9a88123485d0 +size 32418 diff --git a/data/2025/2504_10xxx/2504.10478/images/69f407d3727977dd3ff9ce3bea90b6e7f5e959bfd3caf47043b8ae66bc1639f6.jpg b/data/2025/2504_10xxx/2504.10478/images/69f407d3727977dd3ff9ce3bea90b6e7f5e959bfd3caf47043b8ae66bc1639f6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ed709d12b0fc72a05eb87df837626f7f7334d710 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/69f407d3727977dd3ff9ce3bea90b6e7f5e959bfd3caf47043b8ae66bc1639f6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9451596f45643add1aeaf1cb945cbd7fa1ec609ab65c7067b71d11628dcb7986 +size 11098 diff --git a/data/2025/2504_10xxx/2504.10478/images/6b301ba7c226d983f2678b214edd7278f1cfb4320bcae13b40fbe1456b86da77.jpg b/data/2025/2504_10xxx/2504.10478/images/6b301ba7c226d983f2678b214edd7278f1cfb4320bcae13b40fbe1456b86da77.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7a3dd002a31930be345144b05379c50539c44042 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/6b301ba7c226d983f2678b214edd7278f1cfb4320bcae13b40fbe1456b86da77.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6f739664ac10940136bd2a6224ea34bc2b056184d867c886684611966bc57455 +size 11675 diff --git a/data/2025/2504_10xxx/2504.10478/images/6c18e77dd874f11856943ca580d37e31d9687c5a7be4bc53d7a94208e6f4079a.jpg b/data/2025/2504_10xxx/2504.10478/images/6c18e77dd874f11856943ca580d37e31d9687c5a7be4bc53d7a94208e6f4079a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..82c0e58869f58364278b7c7ebcf640b02779ebf1 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/6c18e77dd874f11856943ca580d37e31d9687c5a7be4bc53d7a94208e6f4079a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:22a66693c9c238d2626c998600476f1056463634cbc6714043b9e801e29aaf5e +size 13624 diff --git a/data/2025/2504_10xxx/2504.10478/images/6c5fa0e260a30825c41c9e0eaa75949a07f90449475e4e78fc1c1a49f05915b4.jpg b/data/2025/2504_10xxx/2504.10478/images/6c5fa0e260a30825c41c9e0eaa75949a07f90449475e4e78fc1c1a49f05915b4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..82879587d9db18ab17f7d5ea772d74eb9331d42d --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/6c5fa0e260a30825c41c9e0eaa75949a07f90449475e4e78fc1c1a49f05915b4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e70064c55b7e5aec6836c9a5a5b876fd68cb0c82870cc0c8c4f5d1345569ab3f +size 26953 diff --git a/data/2025/2504_10xxx/2504.10478/images/6f01ccaf5c53f26ffda8ffe2a095592c21beb991d051e6b3ecc692744b009663.jpg b/data/2025/2504_10xxx/2504.10478/images/6f01ccaf5c53f26ffda8ffe2a095592c21beb991d051e6b3ecc692744b009663.jpg new file mode 100644 index 0000000000000000000000000000000000000000..39940e7b22ecf502b037567c33e0fb7b512011d0 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/6f01ccaf5c53f26ffda8ffe2a095592c21beb991d051e6b3ecc692744b009663.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ab03317e4e43e7580525f8572f85d9129616626b7a318ba5fb06d784cacf316c +size 13207 diff --git a/data/2025/2504_10xxx/2504.10478/images/72f39f593256e825aa89800656acb5de75dd73f0b003c3ffe78459ddb1a8bb19.jpg b/data/2025/2504_10xxx/2504.10478/images/72f39f593256e825aa89800656acb5de75dd73f0b003c3ffe78459ddb1a8bb19.jpg new file mode 100644 index 0000000000000000000000000000000000000000..cc663aa203e729187cfadc1f7973a26ad7fa890d --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/72f39f593256e825aa89800656acb5de75dd73f0b003c3ffe78459ddb1a8bb19.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8f4cdaf8c42eb89e630e77f951455db6a6a03993d6052111b3add7f7b02bde3a +size 4993 diff --git a/data/2025/2504_10xxx/2504.10478/images/73d34b55c39b755f68e6950c8eebdf29d21cb222617fda1fe97a55a0270a9208.jpg b/data/2025/2504_10xxx/2504.10478/images/73d34b55c39b755f68e6950c8eebdf29d21cb222617fda1fe97a55a0270a9208.jpg new file mode 100644 index 0000000000000000000000000000000000000000..31da8ec4d3ee67819f2c627a38ff7f2e71e5d27f --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/73d34b55c39b755f68e6950c8eebdf29d21cb222617fda1fe97a55a0270a9208.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:44cf63bda35c5347be0d630d48b93656aef10b4de9ef83c5e1f3587472349f00 +size 13934 diff --git a/data/2025/2504_10xxx/2504.10478/images/746a4c2b336ab6d80695d60f5f6112ea89a75779153cb6d50e8f7c5219e462ab.jpg b/data/2025/2504_10xxx/2504.10478/images/746a4c2b336ab6d80695d60f5f6112ea89a75779153cb6d50e8f7c5219e462ab.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e8e1a1314e6b8631cd5345b257dec0620c490752 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/746a4c2b336ab6d80695d60f5f6112ea89a75779153cb6d50e8f7c5219e462ab.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bb78bd95396fa71b18cd4bd0d42246548377a1ab504f56fb98018d5b90b8413d +size 18484 diff --git a/data/2025/2504_10xxx/2504.10478/images/75219cfb4f883bfd007fe962ddcd413837dbf356a779a36c396e3403fd88d124.jpg b/data/2025/2504_10xxx/2504.10478/images/75219cfb4f883bfd007fe962ddcd413837dbf356a779a36c396e3403fd88d124.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4a2f3f70bbb81270a178a094c23122821f43a2a2 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/75219cfb4f883bfd007fe962ddcd413837dbf356a779a36c396e3403fd88d124.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d19d37a10b62f5144c7b67c2050303afc450dc3b6ccf8da8bde5e6ce92567216 +size 5009 diff --git a/data/2025/2504_10xxx/2504.10478/images/755487d49278fc8f427c89062e4764a3dbed733574cc63f87205903fb44ea7e0.jpg b/data/2025/2504_10xxx/2504.10478/images/755487d49278fc8f427c89062e4764a3dbed733574cc63f87205903fb44ea7e0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..456337a6228d8d55617a4c71c559fc742570c8fb --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/755487d49278fc8f427c89062e4764a3dbed733574cc63f87205903fb44ea7e0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a53c6fee2f2a571a629b9ae2ca365ff2559405ac56dcbe06f088245a0a368b3e +size 5781 diff --git a/data/2025/2504_10xxx/2504.10478/images/76912c1563647074f02f5a01f22b64a0f1b55b9425bc2f039804f293c31af747.jpg b/data/2025/2504_10xxx/2504.10478/images/76912c1563647074f02f5a01f22b64a0f1b55b9425bc2f039804f293c31af747.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2292d47c898dae7fcda091539027b4ef28f733fa --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/76912c1563647074f02f5a01f22b64a0f1b55b9425bc2f039804f293c31af747.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a5b13cc268008bdd0e3c346dcaa4b2699aa12c7bd986a54ae274c5c62f1660ca +size 12143 diff --git a/data/2025/2504_10xxx/2504.10478/images/76cd3796b40caac6ff8a02aec6cb5d704946a539355c961c1da0cde04fcc0f3b.jpg b/data/2025/2504_10xxx/2504.10478/images/76cd3796b40caac6ff8a02aec6cb5d704946a539355c961c1da0cde04fcc0f3b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..95066780a4bc33c303d329879f2a6e3a294a645f --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/76cd3796b40caac6ff8a02aec6cb5d704946a539355c961c1da0cde04fcc0f3b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ad87edb4dc7744af299ab1526e4fad68fdb2d59db722e86c3ad2f8424ba46da4 +size 13106 diff --git a/data/2025/2504_10xxx/2504.10478/images/7ad9bd5e57c1c23dbd6a2fe73d568d72272de9cf3ff807b8a8da06b7e3ec8421.jpg b/data/2025/2504_10xxx/2504.10478/images/7ad9bd5e57c1c23dbd6a2fe73d568d72272de9cf3ff807b8a8da06b7e3ec8421.jpg new file mode 100644 index 0000000000000000000000000000000000000000..174e0823fa2c5671fa0e1d887dd0f0d0c7650466 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/7ad9bd5e57c1c23dbd6a2fe73d568d72272de9cf3ff807b8a8da06b7e3ec8421.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:63f053042c236de5583de831b60cffeeebc1a09815a779e500ccde25003cd2cd +size 13916 diff --git a/data/2025/2504_10xxx/2504.10478/images/7c463a6b643c401de38db5cb6513d86e528960fd75d924839fc2a365a4fe7d82.jpg b/data/2025/2504_10xxx/2504.10478/images/7c463a6b643c401de38db5cb6513d86e528960fd75d924839fc2a365a4fe7d82.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f328fb04dbe091e5e178c20e3cb35739f362c956 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/7c463a6b643c401de38db5cb6513d86e528960fd75d924839fc2a365a4fe7d82.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:32b89864b824725e5e6a1705605f7d614fb8ee46a286cc0e6c1d85e1c610ad43 +size 14538 diff --git a/data/2025/2504_10xxx/2504.10478/images/7c82543e967ea6f45c2bf933a82bf1272684715fdd543972676d409e39687a79.jpg b/data/2025/2504_10xxx/2504.10478/images/7c82543e967ea6f45c2bf933a82bf1272684715fdd543972676d409e39687a79.jpg new file mode 100644 index 0000000000000000000000000000000000000000..240418fe14a7174a1d0abc420df885e27c95d9df --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/7c82543e967ea6f45c2bf933a82bf1272684715fdd543972676d409e39687a79.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:22169d5284481639481b7f96de7495aa95b84785b51e82b147830991724c6e22 +size 10212 diff --git a/data/2025/2504_10xxx/2504.10478/images/7cfd0f10650b5e5d393aae358e8073bb53dac9ce247d2aced5c9cee49e6ed744.jpg b/data/2025/2504_10xxx/2504.10478/images/7cfd0f10650b5e5d393aae358e8073bb53dac9ce247d2aced5c9cee49e6ed744.jpg new file mode 100644 index 0000000000000000000000000000000000000000..cd313cb4b9461e50bf7732aeb1735d6a9eb9b005 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/7cfd0f10650b5e5d393aae358e8073bb53dac9ce247d2aced5c9cee49e6ed744.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c98d6ccf973c1938f1bbdc153c7ffcbbe75894fb4fd49549fcddcfd8bcb38a0c +size 7992 diff --git a/data/2025/2504_10xxx/2504.10478/images/7e11ff93e0b785d9953e3d89bcd12390815907a6dc2f3d35716009e51d01b2a9.jpg b/data/2025/2504_10xxx/2504.10478/images/7e11ff93e0b785d9953e3d89bcd12390815907a6dc2f3d35716009e51d01b2a9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..606e33da356a9d466e76bc45cdb8a638f1e1e0ce --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/7e11ff93e0b785d9953e3d89bcd12390815907a6dc2f3d35716009e51d01b2a9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7eaf344f6e181f901976615d702742e6dd856e53e48c81fbed2636809b82ed8c +size 14513 diff --git a/data/2025/2504_10xxx/2504.10478/images/814e37e4a8cfad542f088af46bbd948f458f6686fdea387a041191635dae743d.jpg b/data/2025/2504_10xxx/2504.10478/images/814e37e4a8cfad542f088af46bbd948f458f6686fdea387a041191635dae743d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b0799e7c3b80e36e7e0ae566308fa8c56314ae34 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/814e37e4a8cfad542f088af46bbd948f458f6686fdea387a041191635dae743d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eafae8869813a6b495b36b7dae8aecab812a513605fe5783a68731852ec8e0a9 +size 20232 diff --git a/data/2025/2504_10xxx/2504.10478/images/81519a54b17fc6c666684e274696d0ac2127e9fe4ff88d0d9326a87c6149a208.jpg b/data/2025/2504_10xxx/2504.10478/images/81519a54b17fc6c666684e274696d0ac2127e9fe4ff88d0d9326a87c6149a208.jpg new file mode 100644 index 0000000000000000000000000000000000000000..04185b4258ae50b375f57a40f142baea541ee68c --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/81519a54b17fc6c666684e274696d0ac2127e9fe4ff88d0d9326a87c6149a208.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5826cadc3ea321b56bc80fbed30cab63997c8442b7ae716f19806d20cee87b9b +size 14942 diff --git a/data/2025/2504_10xxx/2504.10478/images/860ad658417f481695762b3985aa86cc6c8648a27a1974383b58f81738a321ca.jpg b/data/2025/2504_10xxx/2504.10478/images/860ad658417f481695762b3985aa86cc6c8648a27a1974383b58f81738a321ca.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fe70453f032414ec24ec17691aa5af582408d206 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/860ad658417f481695762b3985aa86cc6c8648a27a1974383b58f81738a321ca.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d2bbad94867018a577c94eb81dd3c2b25fbb48abfd00f6471f3a5f310268494a +size 17683 diff --git a/data/2025/2504_10xxx/2504.10478/images/88a2aea4db6f0ceb07a3f515c45521b42aef420591197fc4ac31c9e750a6eb8c.jpg b/data/2025/2504_10xxx/2504.10478/images/88a2aea4db6f0ceb07a3f515c45521b42aef420591197fc4ac31c9e750a6eb8c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3be13b9a500c96b521406e861b71d6cb16c09bd8 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/88a2aea4db6f0ceb07a3f515c45521b42aef420591197fc4ac31c9e750a6eb8c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e36a2335b45ce79284ba60aa860923bf1c8b5a8099b952703a8ebbafe3ea18eb +size 13955 diff --git a/data/2025/2504_10xxx/2504.10478/images/88f304b72e16859b9ddb0909f86dcbdda34b432dc64f59e1b583d5f961c70d85.jpg b/data/2025/2504_10xxx/2504.10478/images/88f304b72e16859b9ddb0909f86dcbdda34b432dc64f59e1b583d5f961c70d85.jpg new file mode 100644 index 0000000000000000000000000000000000000000..68bc9e81a2f6d054840ea374d9c5ddd79c4e7c8b --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/88f304b72e16859b9ddb0909f86dcbdda34b432dc64f59e1b583d5f961c70d85.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:307b1ed3a5005bd80ec8af1001debdba0f16cead1276b17a8b817644120f5a91 +size 8312 diff --git a/data/2025/2504_10xxx/2504.10478/images/8a3d27f0b455df3c98d77c12e7837d76fbf9b29395a7db957519d3cc75142e50.jpg b/data/2025/2504_10xxx/2504.10478/images/8a3d27f0b455df3c98d77c12e7837d76fbf9b29395a7db957519d3cc75142e50.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f7f15f4d13687697080638224d78438d61ce7132 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/8a3d27f0b455df3c98d77c12e7837d76fbf9b29395a7db957519d3cc75142e50.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3f96dc7f05652821c9037960e916dc6e442b5328e326d6087bb365e334eb44d9 +size 15022 diff --git a/data/2025/2504_10xxx/2504.10478/images/8ab320b8b9b148e7007304b25bcea6941eee571cdcfb9f2bcb270cc915f41d2e.jpg b/data/2025/2504_10xxx/2504.10478/images/8ab320b8b9b148e7007304b25bcea6941eee571cdcfb9f2bcb270cc915f41d2e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..33914b873d4c57b49abb006f9fd906da0a17955d --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/8ab320b8b9b148e7007304b25bcea6941eee571cdcfb9f2bcb270cc915f41d2e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:04eb719fdcfbf169625e1aac1505f4417ba8ace94287b309fdc7bd51cbee9935 +size 14111 diff --git a/data/2025/2504_10xxx/2504.10478/images/8ce829c977aacce11e66df276705eb378c12107b1a887e9b736fc98c415e6ed5.jpg b/data/2025/2504_10xxx/2504.10478/images/8ce829c977aacce11e66df276705eb378c12107b1a887e9b736fc98c415e6ed5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5b5428256322f8f24ef718b5ee481d7d9eb7bee0 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/8ce829c977aacce11e66df276705eb378c12107b1a887e9b736fc98c415e6ed5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ece1f53342b71f104f67dd371726e0c019f512d5857560af40989b30700e17bd +size 8373 diff --git a/data/2025/2504_10xxx/2504.10478/images/9314a20b371980d52ecbb3503a73f93a81b6bb2a673d0623309bc41fbb73253e.jpg b/data/2025/2504_10xxx/2504.10478/images/9314a20b371980d52ecbb3503a73f93a81b6bb2a673d0623309bc41fbb73253e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e996da37bcce36ff80cabb28390db2eba5eee293 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/9314a20b371980d52ecbb3503a73f93a81b6bb2a673d0623309bc41fbb73253e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b87c62771769be1309cfb0dfad23413429d7b47b649f4741744ac98200f2916e +size 9838 diff --git a/data/2025/2504_10xxx/2504.10478/images/946d993abc5cb2f9296711897d897572593e58f08517be74488fe99906faa457.jpg b/data/2025/2504_10xxx/2504.10478/images/946d993abc5cb2f9296711897d897572593e58f08517be74488fe99906faa457.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0411cf63096286eec6390868571eab2391a29c25 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/946d993abc5cb2f9296711897d897572593e58f08517be74488fe99906faa457.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:de101d99ac57ab248715f7617bc6850471ff09f7a88feea71483b0d9549b7589 +size 13214 diff --git a/data/2025/2504_10xxx/2504.10478/images/973273b93ff0af969d810af2019c0e62c8240895ed9620fb5e5ac263b61fb546.jpg b/data/2025/2504_10xxx/2504.10478/images/973273b93ff0af969d810af2019c0e62c8240895ed9620fb5e5ac263b61fb546.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a4898e1380ebf15a7673a9b5a7f1d62756f805a0 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/973273b93ff0af969d810af2019c0e62c8240895ed9620fb5e5ac263b61fb546.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7484b057f63c6929bb8fe9f06cd1a9785b0c2cbe098eda5871a344a562a61750 +size 13897 diff --git a/data/2025/2504_10xxx/2504.10478/images/97906774fe3390f2c8dce9d365178b77721d1991265e137bf86ad95237738532.jpg b/data/2025/2504_10xxx/2504.10478/images/97906774fe3390f2c8dce9d365178b77721d1991265e137bf86ad95237738532.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5965c4b647b9d6f5cb19b80427d6a71e96a383d5 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/97906774fe3390f2c8dce9d365178b77721d1991265e137bf86ad95237738532.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ef4331f3a403c1d1067d00ac0e0fd52695d20a40ef32b3fcb6e70f7eb3d393e6 +size 12091 diff --git a/data/2025/2504_10xxx/2504.10478/images/97b56b7e63b48c137ae6e876983d6e3b63f26d408263648812f7955049209453.jpg b/data/2025/2504_10xxx/2504.10478/images/97b56b7e63b48c137ae6e876983d6e3b63f26d408263648812f7955049209453.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b50a8e9beaab58ab61a8146b1ce9070ff01f3a1a --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/97b56b7e63b48c137ae6e876983d6e3b63f26d408263648812f7955049209453.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7b8b7086570bfe9bb48ed4a1dcb80f225a6f9a036cb8e7a6fb46a7eb2a30e8a9 +size 14434 diff --git a/data/2025/2504_10xxx/2504.10478/images/9904b72553ca60a37dd41207977ebf59630a4c6ab09f09ed468ec2b71b8662e2.jpg b/data/2025/2504_10xxx/2504.10478/images/9904b72553ca60a37dd41207977ebf59630a4c6ab09f09ed468ec2b71b8662e2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6a6794f33062035ca5f3757591a36e5d499884f8 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/9904b72553ca60a37dd41207977ebf59630a4c6ab09f09ed468ec2b71b8662e2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:36c774ced24c09605cdfcbefeb9b16e2356612868d161ed41e94bbbe0ffc1198 +size 5450 diff --git a/data/2025/2504_10xxx/2504.10478/images/9a00ddd660bf8f8eeda9cd85892cb8a7e3465e904ab2b4e6cf073e2f5f617379.jpg b/data/2025/2504_10xxx/2504.10478/images/9a00ddd660bf8f8eeda9cd85892cb8a7e3465e904ab2b4e6cf073e2f5f617379.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c1f8ff0f98e3fb70d592ee38e163ede6f9e1f65a --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/9a00ddd660bf8f8eeda9cd85892cb8a7e3465e904ab2b4e6cf073e2f5f617379.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e74a334f3b4227901e890dac0e06308ffab1693a703fbf9d4179507bf0362dbd +size 19708 diff --git a/data/2025/2504_10xxx/2504.10478/images/9b0b5f9803b94a0b5f4b2b333482b5acab14a7b4396c642f98d4c63f944bbd16.jpg b/data/2025/2504_10xxx/2504.10478/images/9b0b5f9803b94a0b5f4b2b333482b5acab14a7b4396c642f98d4c63f944bbd16.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e51d871ad2171f8938f2903d0a07ed4467e68aed --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/9b0b5f9803b94a0b5f4b2b333482b5acab14a7b4396c642f98d4c63f944bbd16.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:865c34c12c2cd01056baa987e58387e45c30475553fbe9bf7998c8e11413cc28 +size 12864 diff --git a/data/2025/2504_10xxx/2504.10478/images/9dc1172db134cef8ae1e854cc6f68ebfe8e6c96aaeacb27c5b0c870ad9752a67.jpg b/data/2025/2504_10xxx/2504.10478/images/9dc1172db134cef8ae1e854cc6f68ebfe8e6c96aaeacb27c5b0c870ad9752a67.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e08a1f411f2334f118deec7a8e6d4771d00595d9 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/9dc1172db134cef8ae1e854cc6f68ebfe8e6c96aaeacb27c5b0c870ad9752a67.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bf3f2550c67b52ae9869a3b4c41ad163b4e59500d61e70ceb8b58be665d73dcc +size 11008 diff --git a/data/2025/2504_10xxx/2504.10478/images/9e3fdc034bc19b8587b4804417cfbed97b363f7d3658230fe4584772068195a9.jpg b/data/2025/2504_10xxx/2504.10478/images/9e3fdc034bc19b8587b4804417cfbed97b363f7d3658230fe4584772068195a9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..71c9049ae8dfbac634cbcfcf04e3fb3df63a61f4 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/9e3fdc034bc19b8587b4804417cfbed97b363f7d3658230fe4584772068195a9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8c809a37ab88e937a5d27fd100c81ecd6be9c1a6d5fc77b5698bce9e30bb1d6a +size 9861 diff --git a/data/2025/2504_10xxx/2504.10478/images/9e6d23e6964203140d9a54ab2c01c778ff9fc0a06702b556276de27f651dd276.jpg b/data/2025/2504_10xxx/2504.10478/images/9e6d23e6964203140d9a54ab2c01c778ff9fc0a06702b556276de27f651dd276.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f371a331b6876c4fe10ee58132ac8f382af7d60d --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/9e6d23e6964203140d9a54ab2c01c778ff9fc0a06702b556276de27f651dd276.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c5e3f96916d945cc011f48d2734d862cd9ba110b2ed2d20f5b99105bcee5d3e3 +size 17686 diff --git a/data/2025/2504_10xxx/2504.10478/images/9edcca763df460a84257bc718bc153f2f8f5a4c6a79bbf78ad4b859738b6d86e.jpg b/data/2025/2504_10xxx/2504.10478/images/9edcca763df460a84257bc718bc153f2f8f5a4c6a79bbf78ad4b859738b6d86e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e9652737f4c81d9f39cfa53c796a2980f41b99cb --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/9edcca763df460a84257bc718bc153f2f8f5a4c6a79bbf78ad4b859738b6d86e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fdaf769c94aa221336f1c127a35ecc2bbb4afd12fb60a9b9bfb6d403d7b84225 +size 13180 diff --git a/data/2025/2504_10xxx/2504.10478/images/9ef913fae226c6c9e5f5118cb8299e0362f5d447174e6da0d58120967b130043.jpg b/data/2025/2504_10xxx/2504.10478/images/9ef913fae226c6c9e5f5118cb8299e0362f5d447174e6da0d58120967b130043.jpg new file mode 100644 index 0000000000000000000000000000000000000000..435e169c179f8d145730dd6bb7edeeba1f374b92 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/9ef913fae226c6c9e5f5118cb8299e0362f5d447174e6da0d58120967b130043.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:adab9404415dde7f33d6951c5c27e7a739788cdc8102ee6fd34f0d971cb9ea34 +size 13813 diff --git a/data/2025/2504_10xxx/2504.10478/images/a3080dc079b0901c51c02399a1907b2e2c96b0a922a54f5a446ed2ca4860e645.jpg b/data/2025/2504_10xxx/2504.10478/images/a3080dc079b0901c51c02399a1907b2e2c96b0a922a54f5a446ed2ca4860e645.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1b31d5ed6e597d4e6b8213503363684e926268c2 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/a3080dc079b0901c51c02399a1907b2e2c96b0a922a54f5a446ed2ca4860e645.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:df284bc22e7f2a5ce97d59571cf0b880a263f153d568dfb973e8c3cbeb5c06d1 +size 4737 diff --git a/data/2025/2504_10xxx/2504.10478/images/a5aea18089ca053347f8eb61bd79c4fcdbe3f455df74855d569db5b6dfaee20b.jpg b/data/2025/2504_10xxx/2504.10478/images/a5aea18089ca053347f8eb61bd79c4fcdbe3f455df74855d569db5b6dfaee20b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1d4cc066d5a33de1f509bfdad4467011aace9838 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/a5aea18089ca053347f8eb61bd79c4fcdbe3f455df74855d569db5b6dfaee20b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6515d037d0053206800b5893185b08c220ba8d42b517b692f69a77297bffb2cb +size 5046 diff --git a/data/2025/2504_10xxx/2504.10478/images/a63859f2d49730f3fc594d38e64daee690ae73ff3dfdfd4802371d22024209fe.jpg b/data/2025/2504_10xxx/2504.10478/images/a63859f2d49730f3fc594d38e64daee690ae73ff3dfdfd4802371d22024209fe.jpg new file mode 100644 index 0000000000000000000000000000000000000000..22182b61c45c1aca6dbb970997e786e44c6d45ae --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/a63859f2d49730f3fc594d38e64daee690ae73ff3dfdfd4802371d22024209fe.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d3e771d7503b6c60b43599d1827204e9f97721f8ab0089e7c1dc987cd90289fc +size 16617 diff --git a/data/2025/2504_10xxx/2504.10478/images/a68455b7785772599da90654e44c982cadd4ddd78f610dab8af5816a70e0c43c.jpg b/data/2025/2504_10xxx/2504.10478/images/a68455b7785772599da90654e44c982cadd4ddd78f610dab8af5816a70e0c43c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6ca5bf5873932822897046016a2855b0e7eb2fd8 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/a68455b7785772599da90654e44c982cadd4ddd78f610dab8af5816a70e0c43c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6c1cb5fcd55a50be773e3dcb7e2ae614a02b2c7d354df4b057dc061a9d0bb9b5 +size 10865 diff --git a/data/2025/2504_10xxx/2504.10478/images/a79e505ca270a42946351e05193fde44a40c51342de249abe1125406caef19ef.jpg b/data/2025/2504_10xxx/2504.10478/images/a79e505ca270a42946351e05193fde44a40c51342de249abe1125406caef19ef.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7679f3e7902cd256eb12b51abfc3015657552032 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/a79e505ca270a42946351e05193fde44a40c51342de249abe1125406caef19ef.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:797a3018e53df2ee068059cba834d10d047c3508892d6e12e930a07a714c317c +size 12277 diff --git a/data/2025/2504_10xxx/2504.10478/images/a8fef11dae9deb43d79b665e01086d9767e0568d82dec02ac4133daecf6e053a.jpg b/data/2025/2504_10xxx/2504.10478/images/a8fef11dae9deb43d79b665e01086d9767e0568d82dec02ac4133daecf6e053a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1c1ce6e1e9eff98fe55985bfd1df1127cd656aa4 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/a8fef11dae9deb43d79b665e01086d9767e0568d82dec02ac4133daecf6e053a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c62ce8cb9cebbe980e28f848e485ba0d311b2cc28ef4f4c9a2437a7411c05beb +size 10397 diff --git a/data/2025/2504_10xxx/2504.10478/images/aa8a4b4c00fe197e0575e11b2fecb88604a4562cbe6fb459230d83e7172326d4.jpg b/data/2025/2504_10xxx/2504.10478/images/aa8a4b4c00fe197e0575e11b2fecb88604a4562cbe6fb459230d83e7172326d4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..356839e6b3272c460b7c2d20121fa2cfb3591e17 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/aa8a4b4c00fe197e0575e11b2fecb88604a4562cbe6fb459230d83e7172326d4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:144ef0e6fe9bfc8f7c30187f24494b15c00c9c13ce168fd2565c1daedb8b8a89 +size 14034 diff --git a/data/2025/2504_10xxx/2504.10478/images/ad7003e3f0b25d46193c9fffc98906a772a124775a56cd8b98a55940824b50a2.jpg b/data/2025/2504_10xxx/2504.10478/images/ad7003e3f0b25d46193c9fffc98906a772a124775a56cd8b98a55940824b50a2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4d225e33d83e4cd923683fcc2eb7dbcef29e8502 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/ad7003e3f0b25d46193c9fffc98906a772a124775a56cd8b98a55940824b50a2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fc68e9b94f923473f02cf550cbfd6af1252d96636686bbc3ee77c8c7802ccee6 +size 13578 diff --git a/data/2025/2504_10xxx/2504.10478/images/adce33339af950e6097d8809ac7898c79d536d03de13d3b42c8e47246adb4ef3.jpg b/data/2025/2504_10xxx/2504.10478/images/adce33339af950e6097d8809ac7898c79d536d03de13d3b42c8e47246adb4ef3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..af45d0c8b6f73b6ba928bbb7b4a9c78d157bb9c5 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/adce33339af950e6097d8809ac7898c79d536d03de13d3b42c8e47246adb4ef3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:996d1011ed3cca371242bcc34a5c38d29fb963dc4865798db1e71ffa58054760 +size 14574 diff --git a/data/2025/2504_10xxx/2504.10478/images/aeb10270cd0a472e542fdfa57315bbbb7b5ba41555291cba501c05e400478d37.jpg b/data/2025/2504_10xxx/2504.10478/images/aeb10270cd0a472e542fdfa57315bbbb7b5ba41555291cba501c05e400478d37.jpg new file mode 100644 index 0000000000000000000000000000000000000000..dc4e0eda28c745ba4ea6e41271c29bb4d4c17096 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/aeb10270cd0a472e542fdfa57315bbbb7b5ba41555291cba501c05e400478d37.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:61ea1f4cc8d6fa4bf2025b212b2ba66967ce75f05b1706ed72cfaf7cf33fbba5 +size 9516 diff --git a/data/2025/2504_10xxx/2504.10478/images/b0c17f52c3d8d9f710084b2e71cd83327cb690eb5a0d9b91600ddb7e86032d53.jpg b/data/2025/2504_10xxx/2504.10478/images/b0c17f52c3d8d9f710084b2e71cd83327cb690eb5a0d9b91600ddb7e86032d53.jpg new file mode 100644 index 0000000000000000000000000000000000000000..caf8d33d817fd2302e875593f96824be39c20014 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/b0c17f52c3d8d9f710084b2e71cd83327cb690eb5a0d9b91600ddb7e86032d53.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e08177accee5b666a6e2006a128cee2bf0764cf7996ca2eead7e8875f030ed74 +size 14580 diff --git a/data/2025/2504_10xxx/2504.10478/images/b0ed055ba002df5032ea09faa74193e795a62153a65d9469f3715cc3b807f7ff.jpg b/data/2025/2504_10xxx/2504.10478/images/b0ed055ba002df5032ea09faa74193e795a62153a65d9469f3715cc3b807f7ff.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4cb2bb5effdeb5399a03fd96b5677ab8e5e05894 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/b0ed055ba002df5032ea09faa74193e795a62153a65d9469f3715cc3b807f7ff.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9092643b3db33aa00c1610cd19bc80a81975be2694bce6730ba3e82386d7d77c +size 8704 diff --git a/data/2025/2504_10xxx/2504.10478/images/b1061168966769f0a706a6cdefd0ca7df762aaf6f5cf2e53465bfdddcf6d261d.jpg b/data/2025/2504_10xxx/2504.10478/images/b1061168966769f0a706a6cdefd0ca7df762aaf6f5cf2e53465bfdddcf6d261d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1a29a6cb2362c91584a2a4cf67756e7ec570634a --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/b1061168966769f0a706a6cdefd0ca7df762aaf6f5cf2e53465bfdddcf6d261d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:966f3f45434fc74b0fef19fcb490d2ceb08b105ac0d3ade87d496274daf96bf2 +size 13705 diff --git a/data/2025/2504_10xxx/2504.10478/images/b1e638e906973e22c65e08bce79f7b716ac0fa8700587808cf2a8fb6a77abce6.jpg b/data/2025/2504_10xxx/2504.10478/images/b1e638e906973e22c65e08bce79f7b716ac0fa8700587808cf2a8fb6a77abce6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f1460ee58689aa3026c86b71f72bae8cf56acbaf --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/b1e638e906973e22c65e08bce79f7b716ac0fa8700587808cf2a8fb6a77abce6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:59e52cae803dd948c2c137080ab4efed7d4923bac723dd3c9f1650340026bf29 +size 9014 diff --git a/data/2025/2504_10xxx/2504.10478/images/b2d2e4fd5b348100cefd256746110d2df91d3244a48b747cbf5561ad703f3c6e.jpg b/data/2025/2504_10xxx/2504.10478/images/b2d2e4fd5b348100cefd256746110d2df91d3244a48b747cbf5561ad703f3c6e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7eed15310cdfb0ea408affc2573ac8ff2a7e237b --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/b2d2e4fd5b348100cefd256746110d2df91d3244a48b747cbf5561ad703f3c6e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0a3fdffcb4262f2f799d9a0169990fcfd3d10c8a86bc03366fcaf7b038260a9f +size 10438 diff --git a/data/2025/2504_10xxx/2504.10478/images/b44675a6666bda0b73ef9e200a9b1d1022ee221c3d4530367974a9e78cee2014.jpg b/data/2025/2504_10xxx/2504.10478/images/b44675a6666bda0b73ef9e200a9b1d1022ee221c3d4530367974a9e78cee2014.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b6b2ca36080b4bc809800576652ee77717d49f8f --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/b44675a6666bda0b73ef9e200a9b1d1022ee221c3d4530367974a9e78cee2014.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5bf175a24c5ec07d526eb91ea9d2f4240aecddd80dd17cbe536b0e4a69d6dac3 +size 10133 diff --git a/data/2025/2504_10xxx/2504.10478/images/b592a7e7ccb7f34cab0c0fe80f8b46299a170f5e015417c6a260678ca37e45d5.jpg b/data/2025/2504_10xxx/2504.10478/images/b592a7e7ccb7f34cab0c0fe80f8b46299a170f5e015417c6a260678ca37e45d5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..45523134d5339d341e4e2126a4f5ff4467137f52 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/b592a7e7ccb7f34cab0c0fe80f8b46299a170f5e015417c6a260678ca37e45d5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b909659395af457df0a37a066d7a69a24f6ca20cf28a579615d37574009d5036 +size 11235 diff --git a/data/2025/2504_10xxx/2504.10478/images/b5a0036c7749a83fbea586e2ad026ccfe84a4e95cb087bdcfd3c7171f8cbc3f2.jpg b/data/2025/2504_10xxx/2504.10478/images/b5a0036c7749a83fbea586e2ad026ccfe84a4e95cb087bdcfd3c7171f8cbc3f2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b045a30169c2a232818ed0fac31fec129a8c5919 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/b5a0036c7749a83fbea586e2ad026ccfe84a4e95cb087bdcfd3c7171f8cbc3f2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:703b967a869e64a7821b7e3efc81faf8605537127731c5f8085c3e6fcc13a116 +size 11151 diff --git a/data/2025/2504_10xxx/2504.10478/images/b5e95e0338cedd9b1f43533a5dbb16642aa6e174015a5f1dd91e1e8a66e77aa3.jpg b/data/2025/2504_10xxx/2504.10478/images/b5e95e0338cedd9b1f43533a5dbb16642aa6e174015a5f1dd91e1e8a66e77aa3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..182d60e83bb48c6c670bb26c1439d0e562258d47 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/b5e95e0338cedd9b1f43533a5dbb16642aa6e174015a5f1dd91e1e8a66e77aa3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c08475b13817b0df77984540739dca45a3d14a722eb78431615838a7c9966a1e +size 45929 diff --git a/data/2025/2504_10xxx/2504.10478/images/b920292dcecb0dc0a1bc2d82f9a4af452fedf7363369eff449048c35d2f2d1b2.jpg b/data/2025/2504_10xxx/2504.10478/images/b920292dcecb0dc0a1bc2d82f9a4af452fedf7363369eff449048c35d2f2d1b2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c86cf6ca08a96211888108a84001a59edad43d30 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/b920292dcecb0dc0a1bc2d82f9a4af452fedf7363369eff449048c35d2f2d1b2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4b5d70cfea0a8d1401c9a20834240fa89c7a51dc6e366ab2c2bef2025b8840ce +size 17604 diff --git a/data/2025/2504_10xxx/2504.10478/images/b944cf1cc45211aee3e8d25a15fec6ac4fa6994c7df46fbac79b006f97f67b7e.jpg b/data/2025/2504_10xxx/2504.10478/images/b944cf1cc45211aee3e8d25a15fec6ac4fa6994c7df46fbac79b006f97f67b7e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9616762629f0e89b1347c51bd66646c042caae90 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/b944cf1cc45211aee3e8d25a15fec6ac4fa6994c7df46fbac79b006f97f67b7e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bddcb009de4d8a0f290eab9011862a126cf9b82b87a469f90a352844b6c81600 +size 2406 diff --git a/data/2025/2504_10xxx/2504.10478/images/bb1d37404a819e2ad64f45cefe623549253879023f3bf030ffe2894bde9a30af.jpg b/data/2025/2504_10xxx/2504.10478/images/bb1d37404a819e2ad64f45cefe623549253879023f3bf030ffe2894bde9a30af.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5b81b23ea0c78f2f2390da406c6fa5d4a50675c7 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/bb1d37404a819e2ad64f45cefe623549253879023f3bf030ffe2894bde9a30af.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b9ef5ad3347a2f291bd4c502995d4109fa598f0b707d21b4f8263fcf775e9707 +size 33830 diff --git a/data/2025/2504_10xxx/2504.10478/images/bd859f21719d1d07b48f5e32d3cf6033dc8039de333fc86dee2565d05cfa3961.jpg b/data/2025/2504_10xxx/2504.10478/images/bd859f21719d1d07b48f5e32d3cf6033dc8039de333fc86dee2565d05cfa3961.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0377ffc4c0c775ee43e652ca5885526422bb1ac4 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/bd859f21719d1d07b48f5e32d3cf6033dc8039de333fc86dee2565d05cfa3961.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:12043f576d15ab69d555f6c3f7a902c07a1c2804cbc0036704160819ce3ce8ae +size 9660 diff --git a/data/2025/2504_10xxx/2504.10478/images/be9b911be9a878ffd4e4573a062e54694ee59dafba8f9f08c927f7135b54b9b9.jpg b/data/2025/2504_10xxx/2504.10478/images/be9b911be9a878ffd4e4573a062e54694ee59dafba8f9f08c927f7135b54b9b9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9b10528e77c6121055071149ccd6246bf00453ce --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/be9b911be9a878ffd4e4573a062e54694ee59dafba8f9f08c927f7135b54b9b9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:13403e955bf5a94db2d7d4e93d334bf599d3743c56edf615657b13c47ed4a03c +size 24393 diff --git a/data/2025/2504_10xxx/2504.10478/images/c02a1a4e88be241a1b94435af768afb77cb72042c3e9af76fe5be4ac8f1b8eeb.jpg b/data/2025/2504_10xxx/2504.10478/images/c02a1a4e88be241a1b94435af768afb77cb72042c3e9af76fe5be4ac8f1b8eeb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6599c150e37aaa9e12e25b7c792381af45622b12 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/c02a1a4e88be241a1b94435af768afb77cb72042c3e9af76fe5be4ac8f1b8eeb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:febb601fadc70d9a9f1b4321cd56baeeaf39a68a80b7a31e7e43b9e7f13e6c3b +size 8530 diff --git a/data/2025/2504_10xxx/2504.10478/images/c03ad321d119e163d7d652bd75fdd1be669582b2e192edd3f0c824e236fa1553.jpg b/data/2025/2504_10xxx/2504.10478/images/c03ad321d119e163d7d652bd75fdd1be669582b2e192edd3f0c824e236fa1553.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f25deef5246c311448e8a88307e69a8c315dd407 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/c03ad321d119e163d7d652bd75fdd1be669582b2e192edd3f0c824e236fa1553.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e0b362875c8bf931f94465fa8645b225b2a6c1900938ac56d011402bfacbfc20 +size 15029 diff --git a/data/2025/2504_10xxx/2504.10478/images/c09374f5d22f90dca1bd1db2225d9246275d5d6c3a44de40384a8f4f0172de11.jpg b/data/2025/2504_10xxx/2504.10478/images/c09374f5d22f90dca1bd1db2225d9246275d5d6c3a44de40384a8f4f0172de11.jpg new file mode 100644 index 0000000000000000000000000000000000000000..348fd4ef1cc7cdcdd494944a0aa31d66674bb319 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/c09374f5d22f90dca1bd1db2225d9246275d5d6c3a44de40384a8f4f0172de11.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c8bd184d592452a1ce190edf6ebcab67d1d0d63e00c5046a30ce6799853c2238 +size 13185 diff --git a/data/2025/2504_10xxx/2504.10478/images/c20602e164d649b2cede3ffcbe6d6d45d34e0fbfdb8701a97c0a3495fe22a13a.jpg b/data/2025/2504_10xxx/2504.10478/images/c20602e164d649b2cede3ffcbe6d6d45d34e0fbfdb8701a97c0a3495fe22a13a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..434552b7849e46a69cf994c309f19e517484343c --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/c20602e164d649b2cede3ffcbe6d6d45d34e0fbfdb8701a97c0a3495fe22a13a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d2a6a5a3705b52a360706a8a33a388a82e3b3d3f6d009d735917d907fc08df9a +size 13623 diff --git a/data/2025/2504_10xxx/2504.10478/images/c5f007038e4a8539395358cfc078786e75a2500ccebd39ea8dcc0d9f3edd38c9.jpg b/data/2025/2504_10xxx/2504.10478/images/c5f007038e4a8539395358cfc078786e75a2500ccebd39ea8dcc0d9f3edd38c9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a8d5c824619bae3be51f941f1cd571d072efa6e1 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/c5f007038e4a8539395358cfc078786e75a2500ccebd39ea8dcc0d9f3edd38c9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7f5ba98b63b04ea235573aee68b5c917ae776b0fdd8fecfe015ac85935ddeaae +size 14747 diff --git a/data/2025/2504_10xxx/2504.10478/images/c6ea5a13b67d1c489b5482fd1dc7e9c590db2bda35acfbb90c820e3cee9fbbba.jpg b/data/2025/2504_10xxx/2504.10478/images/c6ea5a13b67d1c489b5482fd1dc7e9c590db2bda35acfbb90c820e3cee9fbbba.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b0bed57e47f8efcb0e049d224a8af2ce2d9f5356 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/c6ea5a13b67d1c489b5482fd1dc7e9c590db2bda35acfbb90c820e3cee9fbbba.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c4f6c573349f6906a5f16ffed460cc08aefcadaaf50d5bc236ab2159c548ff73 +size 75735 diff --git a/data/2025/2504_10xxx/2504.10478/images/c826bd5a0ec5354340e0780dded03d7ef2d94af5b3717c2da25e8b37e585943e.jpg b/data/2025/2504_10xxx/2504.10478/images/c826bd5a0ec5354340e0780dded03d7ef2d94af5b3717c2da25e8b37e585943e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4da1b516f342ca19b618599f88b1be40776a2776 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/c826bd5a0ec5354340e0780dded03d7ef2d94af5b3717c2da25e8b37e585943e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f2a78ea7dfc43b691bb3d0f4b2ccf3499812e18aad911f8c0bf9d340baab15d8 +size 30670 diff --git a/data/2025/2504_10xxx/2504.10478/images/c8372a03f4c6999a7525a65022b672de503002d5509ddfff70a3d911541e6379.jpg b/data/2025/2504_10xxx/2504.10478/images/c8372a03f4c6999a7525a65022b672de503002d5509ddfff70a3d911541e6379.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2cd80e51154be346e4305e0fece2d9ac5774d63d --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/c8372a03f4c6999a7525a65022b672de503002d5509ddfff70a3d911541e6379.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5790eb7a42c337946e031c7649da2b31e1999d32100f1f5cf607fa213a6df0e1 +size 4342 diff --git a/data/2025/2504_10xxx/2504.10478/images/ca1b539d95c5fb460f2be006fa7134f3f1ba977fdaf654bf1420e28cac93d5b2.jpg b/data/2025/2504_10xxx/2504.10478/images/ca1b539d95c5fb460f2be006fa7134f3f1ba977fdaf654bf1420e28cac93d5b2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..721bfd18b7c8884ebd4e824482c62a6ee862b66f --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/ca1b539d95c5fb460f2be006fa7134f3f1ba977fdaf654bf1420e28cac93d5b2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b2871ff24cd7cd0cbf26235fc014f781b5e80caa00345f02520eb7336da3b173 +size 838 diff --git a/data/2025/2504_10xxx/2504.10478/images/cafeaf2d7bb4404026a1ba3699040624b79be50df113c5458aaa97d41afc6c76.jpg b/data/2025/2504_10xxx/2504.10478/images/cafeaf2d7bb4404026a1ba3699040624b79be50df113c5458aaa97d41afc6c76.jpg new file mode 100644 index 0000000000000000000000000000000000000000..eaec1c6fe006ef60d730bd2f7b10eacabb4ab4b9 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/cafeaf2d7bb4404026a1ba3699040624b79be50df113c5458aaa97d41afc6c76.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3ba269ab8c5f867579e52582ea50d3e16a6dc2f93fa43704e7a7c7e9b8d0c1b7 +size 9842 diff --git a/data/2025/2504_10xxx/2504.10478/images/cb3ca09225ec7f2708663a63adf1bb7d1807c672a937ff5625e5894c7f467191.jpg b/data/2025/2504_10xxx/2504.10478/images/cb3ca09225ec7f2708663a63adf1bb7d1807c672a937ff5625e5894c7f467191.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a71f1ebd03d271dfb8158a5d8b21700458cf4c65 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/cb3ca09225ec7f2708663a63adf1bb7d1807c672a937ff5625e5894c7f467191.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d07be4958b551b537929be8f76ce08f9f70376ec7b93a1302a9b834f20675ed1 +size 12152 diff --git a/data/2025/2504_10xxx/2504.10478/images/cc05d29f10029f117c9149098f2eea1f4f41e939ebff5641009f894692dd280d.jpg b/data/2025/2504_10xxx/2504.10478/images/cc05d29f10029f117c9149098f2eea1f4f41e939ebff5641009f894692dd280d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7258238381ac1e8838109b080517595fabc671ff --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/cc05d29f10029f117c9149098f2eea1f4f41e939ebff5641009f894692dd280d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:92573115de6d28bdd543519f85b630dd802945ffedc36ffd898e5bd2204d362b +size 10192 diff --git a/data/2025/2504_10xxx/2504.10478/images/ccfab5cfd7bae8ef782e079cda3aeda4adbe6f786122fc77799b75ec40133ee2.jpg b/data/2025/2504_10xxx/2504.10478/images/ccfab5cfd7bae8ef782e079cda3aeda4adbe6f786122fc77799b75ec40133ee2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a28fc0000c06afc01db39fbe9aa54a86db4b974f --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/ccfab5cfd7bae8ef782e079cda3aeda4adbe6f786122fc77799b75ec40133ee2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ac296b730a105c2b7c47ed711f60172761645d295cbdad7c02e872f3525a61a7 +size 48688 diff --git a/data/2025/2504_10xxx/2504.10478/images/cd383a929ba7e9a9865537886d40e7be10298f21f2a4eee59bb64f1c03895e08.jpg b/data/2025/2504_10xxx/2504.10478/images/cd383a929ba7e9a9865537886d40e7be10298f21f2a4eee59bb64f1c03895e08.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5718bb0f2ab180260acd395588e8f29237e041de --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/cd383a929ba7e9a9865537886d40e7be10298f21f2a4eee59bb64f1c03895e08.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4f71615a527b87e3a272b9aa371881f0471d920967b806364231c1692a1586a8 +size 11506 diff --git a/data/2025/2504_10xxx/2504.10478/images/ce0b365e2d0cfdbc6033585c43605d8fc9a10bbe002ac2b4022da555660ffbd8.jpg b/data/2025/2504_10xxx/2504.10478/images/ce0b365e2d0cfdbc6033585c43605d8fc9a10bbe002ac2b4022da555660ffbd8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..49bb6fa953c6c80c1fd0ba61b7e0e0865b994519 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/ce0b365e2d0cfdbc6033585c43605d8fc9a10bbe002ac2b4022da555660ffbd8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e2bc880499037d90f1c7b60fa36a036de34384e065609458970750a40debdbff +size 10675 diff --git a/data/2025/2504_10xxx/2504.10478/images/cee9d9f66f1ba05e20958f6280cd378d5414554e40cbe63513148f4d200be612.jpg b/data/2025/2504_10xxx/2504.10478/images/cee9d9f66f1ba05e20958f6280cd378d5414554e40cbe63513148f4d200be612.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0ed605c941faf7d854dee2e23a4b8b21d38e4384 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/cee9d9f66f1ba05e20958f6280cd378d5414554e40cbe63513148f4d200be612.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:503f9af943a284d3266eee339f61d75bc5abd9c4cad6a24a9a25c8fbed6deb7b +size 14705 diff --git a/data/2025/2504_10xxx/2504.10478/images/d05652d4c9f3cc0229b825169a87f0a25576d63a99afa799f8969368cab3b996.jpg b/data/2025/2504_10xxx/2504.10478/images/d05652d4c9f3cc0229b825169a87f0a25576d63a99afa799f8969368cab3b996.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4c4ccad9f54a4d13965e9f71751efcccc53f2af1 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/d05652d4c9f3cc0229b825169a87f0a25576d63a99afa799f8969368cab3b996.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1e295d5afe9b55cf0b454910559afb020e056ddeea66bf65c863b63839aad287 +size 11451 diff --git a/data/2025/2504_10xxx/2504.10478/images/d0b7ba9d3fe9a3b515301116f9548cf159ed54d41ee527845462fc6abdd2b25e.jpg b/data/2025/2504_10xxx/2504.10478/images/d0b7ba9d3fe9a3b515301116f9548cf159ed54d41ee527845462fc6abdd2b25e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c6e871a47f9cb0fa17d7401ff7c2fd8f6649fdda --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/d0b7ba9d3fe9a3b515301116f9548cf159ed54d41ee527845462fc6abdd2b25e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:143cc594505339b524cb2eb64e10ebc426453648346a65a2639d159e2a6c74a0 +size 7769 diff --git a/data/2025/2504_10xxx/2504.10478/images/d3bd9e86127b96e78ed2a923eb025ecabea92613c76f789c22579bde4d166df7.jpg b/data/2025/2504_10xxx/2504.10478/images/d3bd9e86127b96e78ed2a923eb025ecabea92613c76f789c22579bde4d166df7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fc77de793adf1f6b888c6cb9ec7147d0e257caa7 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/d3bd9e86127b96e78ed2a923eb025ecabea92613c76f789c22579bde4d166df7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6c1e4472447d589afadeace48e90a6a85cbb77ca1e4d3bb8a7884ec43772c31f +size 17008 diff --git a/data/2025/2504_10xxx/2504.10478/images/d44e7811d73135bd84f5bafc86582a0b9eb0cab2b18727f87186e109f0181cc6.jpg b/data/2025/2504_10xxx/2504.10478/images/d44e7811d73135bd84f5bafc86582a0b9eb0cab2b18727f87186e109f0181cc6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1ff8b3c32cc7e51fee9db0a3566c79d495b1a5bb --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/d44e7811d73135bd84f5bafc86582a0b9eb0cab2b18727f87186e109f0181cc6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f646eaaf3914ad338714926873288d746cb150b3e41052b6d7f6847873460836 +size 5760 diff --git a/data/2025/2504_10xxx/2504.10478/images/d6563642ee50cd803af3a0e79797353c9f1d469062b359c5cc621ad0702e4063.jpg b/data/2025/2504_10xxx/2504.10478/images/d6563642ee50cd803af3a0e79797353c9f1d469062b359c5cc621ad0702e4063.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e539878c766d50f81152c6fe81c6b35a1d1cc5eb --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/d6563642ee50cd803af3a0e79797353c9f1d469062b359c5cc621ad0702e4063.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:698c81a396cfa2c9302d125d42e1781ed6b1694b1c2986ca203e59c0ce96f87b +size 832 diff --git a/data/2025/2504_10xxx/2504.10478/images/d837c799a3424c44d929865ee8c5c17f2da827b7e87b44e7380e31803ce3e9b3.jpg b/data/2025/2504_10xxx/2504.10478/images/d837c799a3424c44d929865ee8c5c17f2da827b7e87b44e7380e31803ce3e9b3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..861e39fbab971fd305d76a6f28e73b1d565f1a8b --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/d837c799a3424c44d929865ee8c5c17f2da827b7e87b44e7380e31803ce3e9b3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b38cae0b7a75521edc832e9315f5b365b911734685dd5e4679be14eb33697038 +size 5292 diff --git a/data/2025/2504_10xxx/2504.10478/images/d98f4a0d11c754910a2b645db67fdc99936a484cb60dcd83a0344a2437fc161b.jpg b/data/2025/2504_10xxx/2504.10478/images/d98f4a0d11c754910a2b645db67fdc99936a484cb60dcd83a0344a2437fc161b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6b1fd8f93457fe877e80e345524a518e5dc4d192 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/d98f4a0d11c754910a2b645db67fdc99936a484cb60dcd83a0344a2437fc161b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:06fcc438c485a89b3fe9fa7a1c01f7d3b77033f9e23635905331fecea65ff8a7 +size 13768 diff --git a/data/2025/2504_10xxx/2504.10478/images/db21166a21e933cccd81e5480a94c5617a43c7911409efb16d9d732815acbe3d.jpg b/data/2025/2504_10xxx/2504.10478/images/db21166a21e933cccd81e5480a94c5617a43c7911409efb16d9d732815acbe3d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..509d63cbc51f3cea9e88e929302c87169a8da3bd --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/db21166a21e933cccd81e5480a94c5617a43c7911409efb16d9d732815acbe3d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:59e8d861a7dbe497eec0194102a00dae8ac898e335e7979f94ee5389b91ae129 +size 8816 diff --git a/data/2025/2504_10xxx/2504.10478/images/db799a546fac2266a4fb6e884a368cf79f696cc523c048ac9f7712be215438ae.jpg b/data/2025/2504_10xxx/2504.10478/images/db799a546fac2266a4fb6e884a368cf79f696cc523c048ac9f7712be215438ae.jpg new file mode 100644 index 0000000000000000000000000000000000000000..67886b2a5c0147a9797777db68e7cf979e81092c --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/db799a546fac2266a4fb6e884a368cf79f696cc523c048ac9f7712be215438ae.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3a701902b903630609c15f9fd02168da985d165c99bc06e6558aec8a6e8604f7 +size 17693 diff --git a/data/2025/2504_10xxx/2504.10478/images/dc80c75a67d472177dcc63f0074f37ec267e332b7e7317accb544868294cd22e.jpg b/data/2025/2504_10xxx/2504.10478/images/dc80c75a67d472177dcc63f0074f37ec267e332b7e7317accb544868294cd22e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fb10731210bb0239b4707065054becee13718106 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/dc80c75a67d472177dcc63f0074f37ec267e332b7e7317accb544868294cd22e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:05d26af3838efbc7314881d3683237b6ca8403d33f2ea8ae1198772aa12cccbb +size 13548 diff --git a/data/2025/2504_10xxx/2504.10478/images/dcc8297332fec4d4f444504910cf3c945a0afaf6ec9a11be41dfb59a185c4df3.jpg b/data/2025/2504_10xxx/2504.10478/images/dcc8297332fec4d4f444504910cf3c945a0afaf6ec9a11be41dfb59a185c4df3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..faac39bd11a20642487a7a7b9789cc061deaa377 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/dcc8297332fec4d4f444504910cf3c945a0afaf6ec9a11be41dfb59a185c4df3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a475858feb90d792cab00d567356b330e95cdad7bb699db7c9aa46e13bf0b2a5 +size 17164 diff --git a/data/2025/2504_10xxx/2504.10478/images/e0ef05e879f2cabc6cc375cd5dfc6c14399c3e8c637b35a2bdd2dcd0382c64de.jpg b/data/2025/2504_10xxx/2504.10478/images/e0ef05e879f2cabc6cc375cd5dfc6c14399c3e8c637b35a2bdd2dcd0382c64de.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7aa8d235a454cf7a29e76794e1982e87937bb983 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/e0ef05e879f2cabc6cc375cd5dfc6c14399c3e8c637b35a2bdd2dcd0382c64de.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cb56cfdaac34bb5035a50ee43cefe7f171a7b596db00d29c70d86aa37f0a9a1a +size 11486 diff --git a/data/2025/2504_10xxx/2504.10478/images/e15bf9634190a320923c26e9e93036be5f32c027f6a9256280b04a259db1412f.jpg b/data/2025/2504_10xxx/2504.10478/images/e15bf9634190a320923c26e9e93036be5f32c027f6a9256280b04a259db1412f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..de32d2bd52bf00490fa4d0c599344cc793239194 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/e15bf9634190a320923c26e9e93036be5f32c027f6a9256280b04a259db1412f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cb4d937f22727f5fa9d3474f80063721a70f7d7f18ac1a0067443dd5660a671f +size 10645 diff --git a/data/2025/2504_10xxx/2504.10478/images/e2a337f3f3b316e21bd15dad207ede3c32034de3208f9ee58d3e8e8a316f2a94.jpg b/data/2025/2504_10xxx/2504.10478/images/e2a337f3f3b316e21bd15dad207ede3c32034de3208f9ee58d3e8e8a316f2a94.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8b737780115bb20ac53179650190324e855fb4c8 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/e2a337f3f3b316e21bd15dad207ede3c32034de3208f9ee58d3e8e8a316f2a94.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bf121d24bbf96fcea55f7f3048ee3705b37d9d5dadd1972a3a6553ee13092996 +size 15203 diff --git a/data/2025/2504_10xxx/2504.10478/images/e2ab7f724e76325f770f6dd199c9afc3e5801a548582d0e9ac5362497292c00e.jpg b/data/2025/2504_10xxx/2504.10478/images/e2ab7f724e76325f770f6dd199c9afc3e5801a548582d0e9ac5362497292c00e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..36f557d3243d691b4ddb183f0a80bb2f0563f22b --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/e2ab7f724e76325f770f6dd199c9afc3e5801a548582d0e9ac5362497292c00e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:659ec136aff1fe26fc78fb71de7c0e818caff861dc735f08db0217cd713f0cff +size 15942 diff --git a/data/2025/2504_10xxx/2504.10478/images/e3b0978da07922462da538218a0295754fb4a5a2ab33dcf01d466532d3e49fa5.jpg b/data/2025/2504_10xxx/2504.10478/images/e3b0978da07922462da538218a0295754fb4a5a2ab33dcf01d466532d3e49fa5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c287b2d6336a337e154e5538adc6d9285bd991ab --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/e3b0978da07922462da538218a0295754fb4a5a2ab33dcf01d466532d3e49fa5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ca7bdc133b037bc95be50fa45f4027e9dce9fc802b46599c8c7e92e83028b5c1 +size 25543 diff --git a/data/2025/2504_10xxx/2504.10478/images/e5ccfdef5708eae2cdd50fb2c0053f33997475088945e324b1799671240c70ec.jpg b/data/2025/2504_10xxx/2504.10478/images/e5ccfdef5708eae2cdd50fb2c0053f33997475088945e324b1799671240c70ec.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f466a00766156415cefc91d0d6e0f6bbed5a9abb --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/e5ccfdef5708eae2cdd50fb2c0053f33997475088945e324b1799671240c70ec.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0d76b6590d7443a2241686531510373781d6f109d005c72af2b97d925e62ec37 +size 14016 diff --git a/data/2025/2504_10xxx/2504.10478/images/e6835df7d8541ad8ff95d0e959e32537072541d9347a870c518954a0ff19d3c8.jpg b/data/2025/2504_10xxx/2504.10478/images/e6835df7d8541ad8ff95d0e959e32537072541d9347a870c518954a0ff19d3c8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ebffd47d415f9f03dd3d77da5af35b64c61f91d3 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/e6835df7d8541ad8ff95d0e959e32537072541d9347a870c518954a0ff19d3c8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:362a4e8deabcb649fb2fd8798a268b827b6d62795dd8e6749a01875c15ef686f +size 4136 diff --git a/data/2025/2504_10xxx/2504.10478/images/e6f83b4a99c6422bf8387b129df86c65e1957eac012f355da11bc5a79b5e0f61.jpg b/data/2025/2504_10xxx/2504.10478/images/e6f83b4a99c6422bf8387b129df86c65e1957eac012f355da11bc5a79b5e0f61.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f216e9cb2fa6d8036b3c1ac78e2d3abf0c4adb0a --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/e6f83b4a99c6422bf8387b129df86c65e1957eac012f355da11bc5a79b5e0f61.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fde6cad97c5c26a1918d1f5959e82eebe569bb3b29e090b99934351a65392544 +size 12467 diff --git a/data/2025/2504_10xxx/2504.10478/images/e8032220c0101b093c7abbf09b817da62b4a68f6faf283ab9fe582fdf70c1c5e.jpg b/data/2025/2504_10xxx/2504.10478/images/e8032220c0101b093c7abbf09b817da62b4a68f6faf283ab9fe582fdf70c1c5e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..54021992bb95dd9753320cce93618e469170e253 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/e8032220c0101b093c7abbf09b817da62b4a68f6faf283ab9fe582fdf70c1c5e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:863ac36801196737fbfdff9106d6e07f6adf736a350ddb96a8f58ff74c5801f6 +size 36055 diff --git a/data/2025/2504_10xxx/2504.10478/images/eb4eea5cbee71f18b652ea445b4eb335b63d414b6e7e2030b948332fc745a373.jpg b/data/2025/2504_10xxx/2504.10478/images/eb4eea5cbee71f18b652ea445b4eb335b63d414b6e7e2030b948332fc745a373.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bbe511c72946709f163c11cad86d3f61e07472e0 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/eb4eea5cbee71f18b652ea445b4eb335b63d414b6e7e2030b948332fc745a373.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:14bc313fffb083155ba33772865f3977038292491ced137433444c1e40093279 +size 8290 diff --git a/data/2025/2504_10xxx/2504.10478/images/ec5aabdb8afc02d520db433b2ed880cfb460293ab4f514cf1cd416a0e05b8f01.jpg b/data/2025/2504_10xxx/2504.10478/images/ec5aabdb8afc02d520db433b2ed880cfb460293ab4f514cf1cd416a0e05b8f01.jpg new file mode 100644 index 0000000000000000000000000000000000000000..be60020577aa99a8ade7356ea5b3a1a492d59ec6 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/ec5aabdb8afc02d520db433b2ed880cfb460293ab4f514cf1cd416a0e05b8f01.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1589d48a4016ea2ea5b8e3b7c65ed6d520e226eea96ca61497a7b1860d4e2413 +size 5219 diff --git a/data/2025/2504_10xxx/2504.10478/images/f3af3a04795cd7ec6e134e72ceb0fd20ab57c4f2f3d29acda347037690cecaa8.jpg b/data/2025/2504_10xxx/2504.10478/images/f3af3a04795cd7ec6e134e72ceb0fd20ab57c4f2f3d29acda347037690cecaa8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0f6fcfb396b69d4d69f6dc16337f3e914c6cdf50 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/f3af3a04795cd7ec6e134e72ceb0fd20ab57c4f2f3d29acda347037690cecaa8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ed54c02d137658ca99ad0e9fa94fcbd18292821963080171e1f6e524cee9e17f +size 16944 diff --git a/data/2025/2504_10xxx/2504.10478/images/fbd260da70ae9fabe464358e99f7e77b4fd9b2472d84f1bf4de3ae9e3d068a3a.jpg b/data/2025/2504_10xxx/2504.10478/images/fbd260da70ae9fabe464358e99f7e77b4fd9b2472d84f1bf4de3ae9e3d068a3a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c3dbb53cfc5b65e2099bcba6e0e90a868803199d --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/fbd260da70ae9fabe464358e99f7e77b4fd9b2472d84f1bf4de3ae9e3d068a3a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b62984a7cf9a2c9b9e182a0f87f79359d3f5e93f6272f5406cf97b0476c04951 +size 4628 diff --git a/data/2025/2504_10xxx/2504.10478/images/fc1e852587bf0d908433faf87683b609d90596c02a702dd55504a960a15c609c.jpg b/data/2025/2504_10xxx/2504.10478/images/fc1e852587bf0d908433faf87683b609d90596c02a702dd55504a960a15c609c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..47c86fd8896b508b5cee26eea0fd0025eda2dc99 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/fc1e852587bf0d908433faf87683b609d90596c02a702dd55504a960a15c609c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e6ae8f2edcc68c35c93556f470de9cdcbdeb5d8c554354b160d78261663e920f +size 15295 diff --git a/data/2025/2504_10xxx/2504.10478/images/ff377ce696e12e8f4f5eeea526e1b95b06bbe0822fbee618a5e9e16fb377f70c.jpg b/data/2025/2504_10xxx/2504.10478/images/ff377ce696e12e8f4f5eeea526e1b95b06bbe0822fbee618a5e9e16fb377f70c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b91120394f95dbb6007cc16bee13441e5022feb9 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/ff377ce696e12e8f4f5eeea526e1b95b06bbe0822fbee618a5e9e16fb377f70c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:665c3b7c72ae3f486cd176f66686f67815fc8678e3ec2ba298af4e08dd8f8940 +size 6966 diff --git a/data/2025/2504_10xxx/2504.10478/images/ff593e0e957fad7b1f7dc96059bb205874bd6f33868700925b2f9d68f785835d.jpg b/data/2025/2504_10xxx/2504.10478/images/ff593e0e957fad7b1f7dc96059bb205874bd6f33868700925b2f9d68f785835d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0068989716a61caa5d32d12f69661c37b02e68f3 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/images/ff593e0e957fad7b1f7dc96059bb205874bd6f33868700925b2f9d68f785835d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1ad292627a805bda1cda2f28e0cde930132bacd73b028a72aeddb4c13ddaaea7 +size 11767 diff --git a/data/2025/2504_10xxx/2504.10478/layout.json b/data/2025/2504_10xxx/2504.10478/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..54005c9ef3330991e573387e1dc972a87933239b --- /dev/null +++ b/data/2025/2504_10xxx/2504.10478/layout.json @@ -0,0 +1,24945 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 68, + 76, + 402, + 119 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 76, + 402, + 119 + ], + "spans": [ + { + "bbox": [ + 68, + 76, + 402, + 119 + ], + "type": "text", + "content": "Weight Ensembling Improves Reasoning in Language Models" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 68, + 124, + 514, + 138 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 124, + 514, + 138 + ], + "spans": [ + { + "bbox": [ + 68, + 124, + 514, + 138 + ], + "type": "text", + "content": "Xingyu Dang\\*,1 Christina Baek\\*,2 Kaiyue Wen3 Zico Kolter2 Aditi Raghunathan2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 141, + 423, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 141, + 423, + 156 + ], + "spans": [ + { + "bbox": [ + 69, + 141, + 423, + 156 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 69, + 141, + 423, + 156 + ], + "type": "text", + "content": " Tsinghua University " + }, + { + "bbox": [ + 69, + 141, + 423, + 156 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 69, + 141, + 423, + 156 + ], + "type": "text", + "content": " Carnegie Mellon University " + }, + { + "bbox": [ + 69, + 141, + 423, + 156 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 69, + 141, + 423, + 156 + ], + "type": "text", + "content": " Stanford University" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 68, + 165, + 346, + 177 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 165, + 346, + 177 + ], + "spans": [ + { + "bbox": [ + 68, + 165, + 346, + 177 + ], + "type": "inline_equation", + "content": "\\text{品}" + }, + { + "bbox": [ + 68, + 165, + 346, + 177 + ], + "type": "text", + "content": " dangxy20@mails.tsinghua.edu.cn,kbaek@andrew.cmu.edu" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 280, + 206, + 331, + 220 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 280, + 206, + 331, + 220 + ], + "spans": [ + { + "bbox": [ + 280, + 206, + 331, + 220 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 236, + 509, + 392 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 236, + 509, + 392 + ], + "spans": [ + { + "bbox": [ + 104, + 236, + 509, + 392 + ], + "type": "text", + "content": "We investigate a failure mode that arises during the training of reasoning models, where the diversity of generations begins to collapse, leading to suboptimal test-time scaling. Notably, the Pass@1 rate reliably improves during supervised finetuning (SFT), but Pass@k rapidly deteriorates. Surprisingly, a simple intervention of interpolating the weights of the latest SFT checkpoint with an early checkpoint, otherwise known as WiSE-FT, almost completely recovers Pass@k while also improving Pass@1. The WiSE-FT variant achieves better test-time scaling (Best@k, majority vote) and achieves superior results with less data when tuned further by reinforcement learning. Finally, we find that WiSE-FT provides complementary performance gains that cannot be achieved only through diversity-inducing decoding strategies, like temperature scaling. We formalize a bias-variance tradeoff of Pass@k with respect to the expectation and variance of Pass@1 over the test distribution. We find that WiSE-FT can reduce bias and variance simultaneously, while temperature scaling inherently trades off between bias and variance." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 418, + 178, + 432 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 418, + 178, + 432 + ], + "spans": [ + { + "bbox": [ + 69, + 418, + 178, + 432 + ], + "type": "text", + "content": "1 Introduction" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 451, + 543, + 561 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 451, + 543, + 561 + ], + "spans": [ + { + "bbox": [ + 67, + 451, + 543, + 561 + ], + "type": "text", + "content": "Recent advances in large language models (LLMs) have showcased their remarkable ability to perform complex reasoning, yet these successes often hinge on test-time scaling strategies (Lightman et al., 2023; Snell et al., 2024; Wu et al., 2024). In many applications, such as math problems, puzzles, and logical reasoning, LLMs employ a verification framework where it is significantly easier for the model to verify a candidate solution than to generate one from scratch. This distinction has given rise to strategies that sample multiple \"reasoning traces\" or sequences of reasoning steps during inference, selecting the best final guess through an outcome reward model (ORM) or majority vote. In this setting, an upper bound on the performance a model could achieve is measured by Pass@K, or the probability that at least one out of " + }, + { + "bbox": [ + 67, + 451, + 543, + 561 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 67, + 451, + 543, + 561 + ], + "type": "text", + "content": " independently sampled reasoning traces is correct." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 564, + 542, + 687 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 564, + 542, + 687 + ], + "spans": [ + { + "bbox": [ + 67, + 564, + 542, + 687 + ], + "type": "text", + "content": "Unfortunately, while the standard training pipeline of supervised finetuning (SFT) followed by reinforcement learning (RL) dependably improves Pass@1 for reasoning, Pass@K tends to drop early into finetuning (Cobbe et al., 2021; Chow et al., 2024a; Chen et al., 2025). This mismatch arises from a symptom of finetuning called diversity collapse, where overtuned models yield less diverse generations. This is detrimental to Pass@K since the model wastes " + }, + { + "bbox": [ + 67, + 564, + 542, + 687 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 67, + 564, + 542, + 687 + ], + "type": "text", + "content": " attempts on only a handful of guesses. In fact, by analyzing the model's error rate i.e., 1 - Pass@1, across the test distribution, we derive a Pass@K bias-variance trade-off. To improve expected test Pass@K, one can either reduce the bias which is the expected error rate or how much the model's error rate varies across problems. The latter term is connected to diversity - more diversity allows models to hedge and do uniformly well across all test questions. In particular, during SFT, Pass@1 improves (bias ↓) at the cost of diversity collapse (variance ↑)." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 67, + 690, + 542, + 729 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 690, + 542, + 729 + ], + "spans": [ + { + "bbox": [ + 67, + 690, + 542, + 729 + ], + "type": "text", + "content": "Surprisingly, common ways of alleviating diversity collapse, such as early stopping at peak Pass@K or decoding with high temperature, suffer from the reverse trade-off: diversity improves (variance " + }, + { + "bbox": [ + 67, + 690, + 542, + 729 + ], + "type": "inline_equation", + "content": "\\downarrow" + }, + { + "bbox": [ + 67, + 690, + 542, + 729 + ], + "type": "text", + "content": ") at the cost of overall Pass@1 degrading (bias " + }, + { + "bbox": [ + 67, + 690, + 542, + 729 + ], + "type": "inline_equation", + "content": "\\uparrow" + }, + { + "bbox": [ + 67, + 690, + 542, + 729 + ], + "type": "text", + "content": "). Consequently, in this paper we are concerned with a central question:" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 69, + 26, + 266, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 26, + 266, + 38 + ], + "spans": [ + { + "bbox": [ + 69, + 26, + 266, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 14, + 225, + 36, + 567 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 225, + 36, + 567 + ], + "spans": [ + { + "bbox": [ + 14, + 225, + 36, + 567 + ], + "type": "text", + "content": "arXiv:2504.10478v4 [cs.LG] 7 Oct 2025" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 302, + 747, + 309, + 756 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 747, + 309, + 756 + ], + "spans": [ + { + "bbox": [ + 302, + 747, + 309, + 756 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 80, + 77, + 225, + 225 + ], + "blocks": [ + { + "bbox": [ + 80, + 77, + 225, + 225 + ], + "lines": [ + { + "bbox": [ + 80, + 77, + 225, + 225 + ], + "spans": [ + { + "bbox": [ + 80, + 77, + 225, + 225 + ], + "type": "image", + "image_path": "9a00ddd660bf8f8eeda9cd85892cb8a7e3465e904ab2b4e6cf073e2f5f617379.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 97, + 230, + 511, + 244 + ], + "lines": [ + { + "bbox": [ + 97, + 230, + 511, + 244 + ], + "spans": [ + { + "bbox": [ + 97, + 230, + 511, + 244 + ], + "type": "text", + "content": "--- SFT T=0.7 --- SFT T=1.0 WiSE-FT T=1.0 SFT T=1.3 SFT T=1.6" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_footnote" + }, + { + "bbox": [ + 67, + 258, + 541, + 308 + ], + "lines": [ + { + "bbox": [ + 67, + 258, + 541, + 308 + ], + "spans": [ + { + "bbox": [ + 67, + 258, + 541, + 308 + ], + "type": "text", + "content": "Figure 1: Pass@k of WiSE-FT versus SFT on GSM8k Gemma-2-2B supervised finetuned and evaluated on GSM8k. At each SFT timestep " + }, + { + "bbox": [ + 67, + 258, + 541, + 308 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 67, + 258, + 541, + 308 + ], + "type": "text", + "content": ", we evaluate Pass@k of checkpoint " + }, + { + "bbox": [ + 67, + 258, + 541, + 308 + ], + "type": "inline_equation", + "content": "w_{t}" + }, + { + "bbox": [ + 67, + 258, + 541, + 308 + ], + "type": "text", + "content": " (in dashed) with its WiSE-FT variant " + }, + { + "bbox": [ + 67, + 258, + 541, + 308 + ], + "type": "inline_equation", + "content": "1/2 \\cdot w_{t} + 1/2 \\cdot w_{0}" + }, + { + "bbox": [ + 67, + 258, + 541, + 308 + ], + "type": "text", + "content": " (in solid), where traces are independently sampled with temperature " + }, + { + "bbox": [ + 67, + 258, + 541, + 308 + ], + "type": "inline_equation", + "content": "T = [0.7, 1.0, 1.3, 1.6]" + }, + { + "bbox": [ + 67, + 258, + 541, + 308 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 228, + 78, + 378, + 226 + ], + "blocks": [ + { + "bbox": [ + 228, + 78, + 378, + 226 + ], + "lines": [ + { + "bbox": [ + 228, + 78, + 378, + 226 + ], + "spans": [ + { + "bbox": [ + 228, + 78, + 378, + 226 + ], + "type": "image", + "image_path": "3536f4b1df50cd66ddd3bfbccb80c35fc10834c00925c31728553b31b2fbfd2a.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 381, + 78, + 531, + 225 + ], + "blocks": [ + { + "bbox": [ + 381, + 78, + 531, + 225 + ], + "lines": [ + { + "bbox": [ + 381, + 78, + 531, + 225 + ], + "spans": [ + { + "bbox": [ + 381, + 78, + 531, + 225 + ], + "type": "image", + "image_path": "19a01a99597df80ac8df614f4c1787a0a5b99ab4663cc34196f872471af91463.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 75, + 327, + 536, + 354 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 327, + 536, + 354 + ], + "spans": [ + { + "bbox": [ + 75, + 327, + 536, + 354 + ], + "type": "text", + "content": "Is it possible to simultaneously improve both Pass@1 and Pass@K, thereby overcoming the bias-variance tradeoff inherent in current approaches?" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 361, + 541, + 471 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 361, + 541, + 471 + ], + "spans": [ + { + "bbox": [ + 67, + 361, + 541, + 471 + ], + "type": "text", + "content": "In our work, we introduce a simple, scalable and effective intervention that allows models to achieve both high Pass@K and Pass@1 across mathematical reasoning tasks GSM8k, MATH, and AIME. The specific technique we use is a variant of WiSE-FT (Wortsman et al., 2022) where we interpolate the weights of the latest SFT checkpoint " + }, + { + "bbox": [ + 67, + 361, + 541, + 471 + ], + "type": "inline_equation", + "content": "\\boldsymbol{w}_t" + }, + { + "bbox": [ + 67, + 361, + 541, + 471 + ], + "type": "text", + "content": " with an early checkpoint " + }, + { + "bbox": [ + 67, + 361, + 541, + 471 + ], + "type": "inline_equation", + "content": "w_0" + }, + { + "bbox": [ + 67, + 361, + 541, + 471 + ], + "type": "text", + "content": " as " + }, + { + "bbox": [ + 67, + 361, + 541, + 471 + ], + "type": "inline_equation", + "content": "\\boldsymbol{w}_{\\mathrm{WiSE}(t)} = \\frac{1}{2} \\cdot \\boldsymbol{w}_0 + \\frac{1}{2} \\cdot \\boldsymbol{w}_t" + }, + { + "bbox": [ + 67, + 361, + 541, + 471 + ], + "type": "text", + "content": ". Our key finding is that WiSE-FT successfully merges the diverse sampling capabilities of earlier checkpoints while retaining or surpassing the Pass@1 of later checkpoints. In Figure 1, we observe that the WiSE-FT model achieves both higher Pass@K and Pass@1 with more SFT steps " + }, + { + "bbox": [ + 67, + 361, + 541, + 471 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 67, + 361, + 541, + 471 + ], + "type": "text", + "content": ", unlike naive SFT which suffers from an early decay in Pass@K. Moreover, the gains with WiSE-FT is unachievable by early-stopping or diversity-aware decoding alone." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 475, + 541, + 513 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 475, + 541, + 513 + ], + "spans": [ + { + "bbox": [ + 67, + 475, + 541, + 513 + ], + "type": "text", + "content": "Thus, we propose a new paradigm of training reasoning models: 1.) Train extensively using SFT as long as Pass@1 improves, 2.) Perform WiSE-FT with an earlier SFT checkpoint, 3.) Continue tuning the WiSE-FT variant using RL. Overall, the WiSE-FT model has the following immediate practical benefits:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 75, + 521, + 538, + 620 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 75, + 521, + 538, + 556 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 521, + 538, + 556 + ], + "spans": [ + { + "bbox": [ + 75, + 521, + 538, + 556 + ], + "type": "text", + "content": "- Better Test-Time Scaling Across all datasets and base models, the WiSE-FT variant achieves the highest performance with test-time scaling (Majority Vote, ORM) compared to an overtrained SFT model paired with diversity-aware decoding." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 75, + 561, + 538, + 620 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 561, + 538, + 620 + ], + "spans": [ + { + "bbox": [ + 75, + 561, + 538, + 620 + ], + "type": "text", + "content": "- Better Reinforcement Learning Since RL uses self-generated data to tune models, to generalize reliably, it is important for generations to provide sufficient learning signal while also having high coverage over the data space. We find that continued RL training starting from WiSE-FT weights achieves superior results with less synthetic data compared to initializing RL from the last SFT checkpoint and even early-stopped SFT." + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 67, + 631, + 541, + 728 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 631, + 541, + 728 + ], + "spans": [ + { + "bbox": [ + 67, + 631, + 541, + 728 + ], + "type": "text", + "content": "In summary, we provide a comprehensive analysis of how reasoning models suffer from diversity collapse during SFT and its negative downstream impact during RL and test-time scaling. We first discuss our WiSE-FT findings in §4. Motivated by this discovery, we investigate two fundamental questions. First, we investigate diversity collapse during SFT and RL of reasoning models in §5. Diversity collapse not only impacts the model's ability to attempt different guesses. In fact, we make an even stronger observation - the generations of reasoning models converge towards a single reasoning trace for each test question. We theoretically prove that standard RL algorithms (i.e., REINFORCE and GRPO) fail to recover lost diversity in a simplified discrete bandit setting." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 70, + 26, + 264, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 26, + 264, + 38 + ], + "spans": [ + { + "bbox": [ + 70, + 26, + 264, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 746, + 309, + 756 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 746, + 309, + 756 + ], + "spans": [ + { + "bbox": [ + 302, + 746, + 309, + 756 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 76, + 541, + 150 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 76, + 541, + 150 + ], + "spans": [ + { + "bbox": [ + 67, + 76, + 541, + 150 + ], + "type": "text", + "content": "Second, we formalize the competing goals of Pass@1 and Pass@K as a bias-variance trade-off in §6. We empirically measure and compare the bias and variance of WiSE-FT versus early-stopping versus high temperature decoding. Notably, only WiSE-FT reduces both bias and variance. We conclude with a remark on the limitations of decoding strategies such as top-k (Shao et al., 2017), nucleus (Holtzman et al., 2020), and min-p (Nguyen et al., 2024), at eliciting the maximum capabilities with test-time scaling from current reasoning models." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 161, + 190, + 176 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 161, + 190, + 176 + ], + "spans": [ + { + "bbox": [ + 69, + 161, + 190, + 176 + ], + "type": "text", + "content": "2 Related Works" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 190, + 541, + 360 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 190, + 541, + 360 + ], + "spans": [ + { + "bbox": [ + 67, + 190, + 541, + 360 + ], + "type": "text", + "content": "Diversity collapse with SFT: The standard pipeline for enhancing reasoning in LLMs involves an initial phase of supervised fine-tuning (SFT) followed by reinforcement learning (RL) (Guo et al., 2025; Setlur et al., 2024). SFT is critical for instilling interpretable and readable reasoning chains and ensuring that the model adheres to a consistent rollout templates (Guo et al., 2025). However, a number of recent works have identified critical pitfalls of SFT that hinders the model's ability to explore and ultimately it's overall problem solving ability. Notably, Cobbe et al. (2021) observe diversity collapse when finetuning on GSM8k training dataset, during which the Pass@1 continuously improves whereas Pass@k starts to fall shortly into the training. Similar diversity collapse phenomenon also exists in the self-improvement setting with SFT (Song et al., 2024), and is theoretically investigated as the sharpening effect (Huang et al., 2024). This is not desirable as diverse sampling at inference is important for test-time scaling using majority voting (Wang et al., 2023) or reward model guided search (Setlur et al., 2024; Beeching et al., 2024). Yeo et al. (2025); Chu et al. (2025) attribute this behavior to overfitting, memorization of samples and overfixation to a template style leading to reduced generalization. In our work, we corroborate similar findings and propose ensembling over the course of SFT as a mitigation strategy." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 377, + 541, + 547 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 377, + 541, + 547 + ], + "spans": [ + { + "bbox": [ + 67, + 377, + 541, + 547 + ], + "type": "text", + "content": "Mitigating diversity collapse: Given the importance of diversity for effectively scaling inference-time compute, several recent works have proposed auxiliary finetuning objectives and decoding strategies to mitigate diversity collapse. Li et al. (2025) regularize the SFT process using a game-theoretic framework that encourages sparse updates, thereby preserving output diversity. Zhang et al. (2024b) directly optimizes for diversity during finetuning. Other approaches modify the finetuning procedure to directly optimize for Best-of-N sampling at inference time (Chow et al., 2024b; Sessa et al., 2024; Chen et al., 2025). Another line of work focuses on inference-time decoding, explicitly encouraging diverse solutions through modified beam search strategies (Vijayakumar et al., 2018; Olausson et al., 2024; Chen et al., 2024; Beeching et al., 2024). Li et al. (2023) improve diversity during parallel decoding by appending curated prompts to the input. In formal reasoning settings e.g., Lean, methods such as Monte Carlo tree search have been used to diversify intermediate reasoning steps, as demonstrated in AlphaProof (AlphaProof and AlphaGeometry teams, 2024). In this work, we identify a simple and complementary intervention during the finetuning process to maintain the diversity of generations. We especially care about enforcing diversity while preserving the overall accuracy of generations." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 558, + 345, + 574 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 558, + 345, + 574 + ], + "spans": [ + { + "bbox": [ + 69, + 558, + 345, + 574 + ], + "type": "text", + "content": "3 Preliminaries and Experimental Setup" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 588, + 289, + 602 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 588, + 289, + 602 + ], + "spans": [ + { + "bbox": [ + 69, + 588, + 289, + 602 + ], + "type": "text", + "content": "3.1 Pass@k, Best@k, and Majority Vote" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 611, + 541, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 611, + 541, + 662 + ], + "spans": [ + { + "bbox": [ + 67, + 611, + 541, + 662 + ], + "type": "text", + "content": "Given a reasoning model " + }, + { + "bbox": [ + 67, + 611, + 541, + 662 + ], + "type": "inline_equation", + "content": "f(\\cdot)" + }, + { + "bbox": [ + 67, + 611, + 541, + 662 + ], + "type": "text", + "content": ", a decoding strategy " + }, + { + "bbox": [ + 67, + 611, + 541, + 662 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 67, + 611, + 541, + 662 + ], + "type": "text", + "content": ", and problem " + }, + { + "bbox": [ + 67, + 611, + 541, + 662 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 67, + 611, + 541, + 662 + ], + "type": "text", + "content": ", the model's solution is obtained by sampling a reasoning trace " + }, + { + "bbox": [ + 67, + 611, + 541, + 662 + ], + "type": "inline_equation", + "content": "r := [x, s^{(1)}, s^{(2)}, \\dots, s^{(n)}, \\hat{y}]" + }, + { + "bbox": [ + 67, + 611, + 541, + 662 + ], + "type": "text", + "content": " consisting of a sequence of intermediate steps " + }, + { + "bbox": [ + 67, + 611, + 541, + 662 + ], + "type": "inline_equation", + "content": "s^{(i)}" + }, + { + "bbox": [ + 67, + 611, + 541, + 662 + ], + "type": "text", + "content": " and a final guess " + }, + { + "bbox": [ + 67, + 611, + 541, + 662 + ], + "type": "inline_equation", + "content": "\\hat{y}" + }, + { + "bbox": [ + 67, + 611, + 541, + 662 + ], + "type": "text", + "content": ". Given " + }, + { + "bbox": [ + 67, + 611, + 541, + 662 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 67, + 611, + 541, + 662 + ], + "type": "text", + "content": " independently sampled traces, Pass@K measures the probability that at least one guess matches the true answer " + }, + { + "bbox": [ + 67, + 611, + 541, + 662 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 67, + 611, + 541, + 662 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 138, + 666, + 542, + 685 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 666, + 542, + 685 + ], + "spans": [ + { + "bbox": [ + 138, + 666, + 542, + 685 + ], + "type": "interline_equation", + "content": "\\operatorname {P a s s} @ \\mathrm {K} (x) = \\mathbb {E} _ {[ \\boldsymbol {r} _ {i} ] _ {i = 1} ^ {k} \\sim D (f (x))} [ \\mathbb {1} \\{\\exists i \\in [ k ] \\text {s . t .} \\hat {y} _ {i} = y \\} ] = 1 - (1 - \\rho_ {x}) ^ {K} \\tag {1}", + "image_path": "7c82543e967ea6f45c2bf933a82bf1272684715fdd543972676d409e39687a79.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 689, + 541, + 729 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 689, + 541, + 729 + ], + "spans": [ + { + "bbox": [ + 67, + 689, + 541, + 729 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 67, + 689, + 541, + 729 + ], + "type": "inline_equation", + "content": "\\rho_{x} = P(\\hat{y} = y\\mid x,f,D)" + }, + { + "bbox": [ + 67, + 689, + 541, + 729 + ], + "type": "text", + "content": " is the Pass@1 or marginal probability of sampling the ground truth answer. Then " + }, + { + "bbox": [ + 67, + 689, + 541, + 729 + ], + "type": "inline_equation", + "content": "(1 - \\rho_x)^K" + }, + { + "bbox": [ + 67, + 689, + 541, + 729 + ], + "type": "text", + "content": " is the probability that all " + }, + { + "bbox": [ + 67, + 689, + 541, + 729 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 67, + 689, + 541, + 729 + ], + "type": "text", + "content": " guesses are incorrect. We will refer to Pass@1 as " + }, + { + "bbox": [ + 67, + 689, + 541, + 729 + ], + "type": "inline_equation", + "content": "\\rho_{x}" + }, + { + "bbox": [ + 67, + 689, + 541, + 729 + ], + "type": "text", + "content": " interchangeably in our paper." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 69, + 26, + 264, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 26, + 264, + 38 + ], + "spans": [ + { + "bbox": [ + 69, + 26, + 264, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 746, + 309, + 756 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 746, + 309, + 756 + ], + "spans": [ + { + "bbox": [ + 302, + 746, + 309, + 756 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 76, + 541, + 101 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 76, + 541, + 101 + ], + "spans": [ + { + "bbox": [ + 67, + 76, + 541, + 101 + ], + "type": "text", + "content": "In practice, test-time compute is scaled by selecting one of " + }, + { + "bbox": [ + 67, + 76, + 541, + 101 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 67, + 76, + 541, + 101 + ], + "type": "text", + "content": " guesses either by a output reward model (ORM) or Majority Vote. Then we can measure Best@K as" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 101, + 110, + 508, + 146 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 101, + 110, + 508, + 146 + ], + "spans": [ + { + "bbox": [ + 101, + 110, + 508, + 146 + ], + "type": "interline_equation", + "content": "\\operatorname {B e s t} @ \\mathrm {K} (x) = \\mathbb {E} _ {[ \\boldsymbol {r} _ {i} ] _ {i = 1} ^ {k} \\sim D (f (x))} [ \\hat {y} _ {i ^ {*}} = y ] \\text {w h e r e} i ^ {*} = \\arg \\max _ {i \\in [ K ]} \\sum_ {j = 1} ^ {K} \\mathbb {1} \\left\\{\\hat {y} _ {i} = \\hat {y} _ {j} \\right\\} \\text {o r} \\operatorname {O R M} (\\boldsymbol {r} _ {i})", + "image_path": "2b5bc1934d7c465d670fdb3f31d7c83014925d80d56f23229733f5fb3a5e4176.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 152, + 541, + 190 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 152, + 541, + 190 + ], + "spans": [ + { + "bbox": [ + 67, + 152, + 541, + 190 + ], + "type": "text", + "content": "Notably, Pass@K is equivalent to Best@K using a perfect ORM verifier. As we will observe, WiSE-FT achieves both higher Pass@1 and Pass@K and this directly translates to achieving better Best@K with an ORM verifier and by Majority Vote." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 201, + 293, + 216 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 201, + 293, + 216 + ], + "spans": [ + { + "bbox": [ + 69, + 201, + 293, + 216 + ], + "type": "text", + "content": "3.2 Weight-Space Ensembling (WiSE-FT)" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 225, + 541, + 285 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 225, + 541, + 285 + ], + "spans": [ + { + "bbox": [ + 67, + 225, + 541, + 285 + ], + "type": "text", + "content": "WiSE-FT is a weight-space ensembling technique proposed by Wortzman et al. (2022) to improve the out-of-distribution accuracy of finetuned models at no extra computational cost. In particular, while models tend to achieve better in-distribution performance after finetuning, they tend to be less robust to distribution shift. Surprisingly, by simply interpolating the weights of the finetuned model " + }, + { + "bbox": [ + 67, + 225, + 541, + 285 + ], + "type": "inline_equation", + "content": "\\boldsymbol{w}_t" + }, + { + "bbox": [ + 67, + 225, + 541, + 285 + ], + "type": "text", + "content": " with the pretrained weights " + }, + { + "bbox": [ + 67, + 225, + 541, + 285 + ], + "type": "inline_equation", + "content": "\\boldsymbol{w}_0" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 229, + 293, + 541, + 309 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 229, + 293, + 541, + 309 + ], + "spans": [ + { + "bbox": [ + 229, + 293, + 541, + 309 + ], + "type": "interline_equation", + "content": "\\boldsymbol {w} _ {\\mathrm {W i S E} (t)} = \\delta \\cdot \\boldsymbol {w} _ {0} + (1 - \\delta) \\cdot \\boldsymbol {w} _ {t} \\tag {2}", + "image_path": "02544de3a14cfeaa6aea139b5b0ab1cbd6ec4f541559f7e7213fbf4c3e2553a9.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 315, + 541, + 365 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 315, + 541, + 365 + ], + "spans": [ + { + "bbox": [ + 67, + 315, + 541, + 365 + ], + "type": "text", + "content": "WiSE-FT can achieve best of both words: the out-of-distribution accuracy of models improves without incurring a drop in in-distribution accuracy. Similar to this philosophy, we apply weight ensembling to achieve both the diverse generation ability of early SFT checkpoints while maintaining the high Pass@1 accuracy of later SFT checkpoints." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 69, + 376, + 274, + 391 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 376, + 274, + 391 + ], + "spans": [ + { + "bbox": [ + 69, + 376, + 274, + 391 + ], + "type": "text", + "content": "3.3 Training and Evaluation Pipeline" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 399, + 541, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 399, + 541, + 521 + ], + "spans": [ + { + "bbox": [ + 67, + 399, + 541, + 521 + ], + "type": "text", + "content": "The majority of our experiments are conducted on Gemma-2-2B and Qwen-2.5-0.5B. We perform SFT on a 30K subset of rephrased augmentations of GSM8k (Cobbe et al., 2021) and MATH (Hendrycks et al., 2021) in MetaMath40k (Yu et al., 2023) for 1710 steps or 10 epochs. We then continue finetuning on another 30K subset of rephrased training questions from MetaMath using Group Relative Policy Optimization (GRPO) with a binary reward of the correctness of the model's final answer. Finally, we evaluate models on GSM8K and MATH500, respectively. To estimate the true Pass@K and Pass@1 marginalized over the distribution of sampled traces, we sample 100 reasoning traces per test example and average over them to estimate Pass@1, i.e. " + }, + { + "bbox": [ + 67, + 399, + 541, + 521 + ], + "type": "inline_equation", + "content": "\\rho_{x}" + }, + { + "bbox": [ + 67, + 399, + 541, + 521 + ], + "type": "text", + "content": ". Then to calculate Pass@K, we use the theoretical formula " + }, + { + "bbox": [ + 67, + 399, + 541, + 521 + ], + "type": "inline_equation", + "content": "1 - (1 - \\rho_{x})^{K}" + }, + { + "bbox": [ + 67, + 399, + 541, + 521 + ], + "type": "text", + "content": " in Equation 1. Unless noted otherwise, we employ a naive decoding strategy with top-p threshold 0.9, temperature " + }, + { + "bbox": [ + 67, + 399, + 541, + 521 + ], + "type": "inline_equation", + "content": "T = 0.8" + }, + { + "bbox": [ + 67, + 399, + 541, + 521 + ], + "type": "text", + "content": ", and top-k with " + }, + { + "bbox": [ + 67, + 399, + 541, + 521 + ], + "type": "inline_equation", + "content": "K = 50" + }, + { + "bbox": [ + 67, + 399, + 541, + 521 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 67, + 534, + 449, + 552 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 534, + 449, + 552 + ], + "spans": [ + { + "bbox": [ + 67, + 534, + 449, + 552 + ], + "type": "text", + "content": "4 Improving Diverse Reasoning Capabilities by WiSE-FT" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 67, + 564, + 541, + 638 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 564, + 541, + 638 + ], + "spans": [ + { + "bbox": [ + 67, + 564, + 541, + 638 + ], + "type": "text", + "content": "We first carefully track Pass@K for " + }, + { + "bbox": [ + 67, + 564, + 541, + 638 + ], + "type": "inline_equation", + "content": "K \\in \\{1, 4, 32\\}" + }, + { + "bbox": [ + 67, + 564, + 541, + 638 + ], + "type": "text", + "content": " across the SFT trajectory of Qwen-2.5-0.5B and Gemma-2-2B. Similar to findings from Cobbe et al. (2021); Chen et al. (2025), we observe that Pass@1 continues to improve with longer SFT, whereas for larger " + }, + { + "bbox": [ + 67, + 564, + 541, + 638 + ], + "type": "inline_equation", + "content": "K = 4, 32" + }, + { + "bbox": [ + 67, + 564, + 541, + 638 + ], + "type": "text", + "content": ", Pass@K tends to peak much earlier on in training (in Figure 1, 17, and 19). In other words, while later SFT checkpoints achieve higher Pass@1, earlier SFT checkpoint achieve higher Pass@K. This tradeoff in model selection is not ideal downstream for test-time scaling." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 67, + 643, + 541, + 730 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 643, + 541, + 730 + ], + "spans": [ + { + "bbox": [ + 67, + 643, + 541, + 730 + ], + "type": "text", + "content": "Building upon this intuition, we propose weight ensembling between earlier and later SFT checkpoints. We apply a variant of WiSE-FT where instead of the pretrained model, we interpolate between the earliest SFT checkpoint (in our case, after 1 epoch of training) and the weights of later checkpoint. As shown in Figure 2, we observe a \"sweet spot\" of interpolation coefficients " + }, + { + "bbox": [ + 67, + 643, + 541, + 730 + ], + "type": "inline_equation", + "content": "\\delta \\in (0,1)" + }, + { + "bbox": [ + 67, + 643, + 541, + 730 + ], + "type": "text", + "content": " where the WiSE-FT model achieves both higher Pass@1 than the last SFT model and higher Pass@K than the early SFT model. We will fix " + }, + { + "bbox": [ + 67, + 643, + 541, + 730 + ], + "type": "inline_equation", + "content": "\\delta = 1/2" + }, + { + "bbox": [ + 67, + 643, + 541, + 730 + ], + "type": "text", + "content": ", which generally performs decently for all of the datasets we've tested. In fact, after WiSE-FT " + }, + { + "bbox": [ + 67, + 643, + 541, + 730 + ], + "type": "inline_equation", + "content": "w_{\\mathrm{WiSE}(t)}" + }, + { + "bbox": [ + 67, + 643, + 541, + 730 + ], + "type": "text", + "content": ", both Pass@1 and Pass@k grow monotonically with SFT steps " + }, + { + "bbox": [ + 67, + 643, + 541, + 730 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 67, + 643, + 541, + 730 + ], + "type": "text", + "content": " (see Figure 1)." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 69, + 26, + 264, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 26, + 264, + 38 + ], + "spans": [ + { + "bbox": [ + 69, + 26, + 264, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 746, + 309, + 756 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 746, + 309, + 756 + ], + "spans": [ + { + "bbox": [ + 302, + 746, + 309, + 756 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 72, + 77, + 223, + 223 + ], + "blocks": [ + { + "bbox": [ + 72, + 77, + 223, + 223 + ], + "lines": [ + { + "bbox": [ + 72, + 77, + 223, + 223 + ], + "spans": [ + { + "bbox": [ + 72, + 77, + 223, + 223 + ], + "type": "image", + "image_path": "546890eae7b6307835210a35ac4546692989e395b2041aa92121b17022a37557.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 235, + 541, + 297 + ], + "lines": [ + { + "bbox": [ + 67, + 235, + 541, + 297 + ], + "spans": [ + { + "bbox": [ + 67, + 235, + 541, + 297 + ], + "type": "text", + "content": "Figure 2: Pass@1 vs. Pass@K across Interpolation Coefficients We perform WiSEFT with " + }, + { + "bbox": [ + 67, + 235, + 541, + 297 + ], + "type": "inline_equation", + "content": "\\delta \\in [0.1, 0.9]" + }, + { + "bbox": [ + 67, + 235, + 541, + 297 + ], + "type": "text", + "content": " between the first and last checkpoints of model (in legend) finetuned on GSM8K, MATH, and OpenThoughts-114K, then evaluate on GSM8K, MATH500, and AIME24, respectively. Early SFT model observe higher Pass@K (y-axis) while later SFT model observes higher Pass@1 (x-axis). The interpolated model observe best of both metrics." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 231, + 78, + 380, + 224 + ], + "blocks": [ + { + "bbox": [ + 231, + 78, + 380, + 224 + ], + "lines": [ + { + "bbox": [ + 231, + 78, + 380, + 224 + ], + "spans": [ + { + "bbox": [ + 231, + 78, + 380, + 224 + ], + "type": "image", + "image_path": "6c5fa0e260a30825c41c9e0eaa75949a07f90449475e4e78fc1c1a49f05915b4.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 391, + 78, + 539, + 224 + ], + "blocks": [ + { + "bbox": [ + 391, + 78, + 539, + 224 + ], + "lines": [ + { + "bbox": [ + 391, + 78, + 539, + 224 + ], + "spans": [ + { + "bbox": [ + 391, + 78, + 539, + 224 + ], + "type": "image", + "image_path": "5137cb2f59693ab411059d8208f1a96ff60109df3970efcaaad046b30b11e30c.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 321, + 541, + 406 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 321, + 541, + 406 + ], + "spans": [ + { + "bbox": [ + 67, + 321, + 541, + 406 + ], + "type": "text", + "content": "Better Test-Time Scaling This boost in both Pass@1 and Pass@K directly translates to better performance with test-time scaling. We measure Best@K by Majority Vote and by selecting the reasoning trace with highest reward using an off-the-shelf ORM RLHFlow/Llama3.1-8B-PRM-Deepseek-Data (Xiong et al., 2024). We evaluate the performance of the last SFT checkpoint with highest Pass@1 versus the corresponding WiSE-FT variant with " + }, + { + "bbox": [ + 67, + 321, + 541, + 406 + ], + "type": "inline_equation", + "content": "\\delta = 1/2" + }, + { + "bbox": [ + 67, + 321, + 541, + 406 + ], + "type": "text", + "content": ". In Figure 3, we see that the performance gap on MATH500 between the final Gemma-2-2B SFT checkpoint and Wise-FT model widens with larger " + }, + { + "bbox": [ + 67, + 321, + 541, + 406 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 67, + 321, + 541, + 406 + ], + "type": "text", + "content": ". The WiSE-FT model achieves " + }, + { + "bbox": [ + 67, + 321, + 541, + 406 + ], + "type": "inline_equation", + "content": "5 - 7\\%" + }, + { + "bbox": [ + 67, + 321, + 541, + 406 + ], + "type": "text", + "content": " better performance with test-time scaling." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 436, + 541, + 568 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 436, + 541, + 568 + ], + "spans": [ + { + "bbox": [ + 67, + 436, + 541, + 568 + ], + "type": "text", + "content": "Better RL Scaling WiSE-FT's ability to achieve both high Pass@1 and Pass@K is particularly advantageous for continued RL training where models are further trained by policy gradient methods using self-generated data. In particular, WiSE-FT is able to generate data rich in learning signal (high Pass@1) while still having high coverage over the data space (high Pass@K). We continue training on rephrased training questions of GSM8K and MATH using GRPO paired with a binary reward of the correctness of the final guess. Across runs, we observe that continued RL training starting from the final WiSE-FT model improves performance more stably than finetuning starting from the final SFT checkpoint. Notably the final SFT checkpoint suffers low coverage over the data space, causing Pass@1 to improve slowly. We also try continued RL training from an earlier SFT checkpoint with peak Pass@4 performance. While RL scales better over the early SFT checkpoint in comparison to the final checkpoint, the performance still remains subpar compared to WiSE-FT." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 582, + 286, + 597 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 582, + 286, + 597 + ], + "spans": [ + { + "bbox": [ + 69, + 582, + 286, + 597 + ], + "type": "text", + "content": "4.1 General Purpose Reasoning Models" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 607, + 541, + 728 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 607, + 541, + 728 + ], + "spans": [ + { + "bbox": [ + 67, + 607, + 541, + 728 + ], + "type": "text", + "content": "So far we have studied the effect of WiSE-FT on models tuned on reasoning data for the same specific reasoning task (e.g., train on GSM8k and evaluate on GSM8k). We've additionally tested how well our findings generalize to models trained on general purpose reasoning datasets and tested on a out-of-distribution reasoning task. We take Qwen2.5-7B-Instruct and SFT for 5 epochs on OpenThoughts-114k, a high-quality synthetic dataset of math, science, and coding questions paired with DeepSeek-R1 completions, then evaluate its performance on AIME24 competition problems (with ASY code for figures from Muennighoff et al. (2025)). In this setting, the Pass@K trends during SFT on is more subtle. We still observe diversity collapse in Figure 12, but the affect is not strong enough for Pass@K to drop back down. However, we observe that the rate at which Pass@K improves for " + }, + { + "bbox": [ + 67, + 607, + 541, + 728 + ], + "type": "inline_equation", + "content": "K \\in \\{16,32\\}" + }, + { + "bbox": [ + 67, + 607, + 541, + 728 + ], + "type": "text", + "content": " slows down early while Pass@1 grows at a constant rate (Figure 10). We then perform WiSE-FT between the final and earlier checkpoint with" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 70, + 26, + 264, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 26, + 264, + 38 + ], + "spans": [ + { + "bbox": [ + 70, + 26, + 264, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 746, + 309, + 756 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 746, + 309, + 756 + ], + "spans": [ + { + "bbox": [ + 302, + 746, + 309, + 756 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 72, + 76, + 206, + 223 + ], + "blocks": [ + { + "bbox": [ + 72, + 76, + 206, + 223 + ], + "lines": [ + { + "bbox": [ + 72, + 76, + 206, + 223 + ], + "spans": [ + { + "bbox": [ + 72, + 76, + 206, + 223 + ], + "type": "image", + "image_path": "4784db23bc7951ee2fc6656f65f8c2d3d009a5771392f48ebe1e24aa859028e1.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 209, + 77, + 337, + 223 + ], + "blocks": [ + { + "bbox": [ + 209, + 77, + 337, + 223 + ], + "lines": [ + { + "bbox": [ + 209, + 77, + 337, + 223 + ], + "spans": [ + { + "bbox": [ + 209, + 77, + 337, + 223 + ], + "type": "image", + "image_path": "281b7e3b04727e31eb48c9f9eb0dac923c0ed6c74f21659a0a4d939eab7dbcdc.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 343, + 79, + 533, + 225 + ], + "blocks": [ + { + "bbox": [ + 343, + 79, + 533, + 225 + ], + "lines": [ + { + "bbox": [ + 343, + 79, + 533, + 225 + ], + "spans": [ + { + "bbox": [ + 343, + 79, + 533, + 225 + ], + "type": "image", + "image_path": "e3b0978da07922462da538218a0295754fb4a5a2ab33dcf01d466532d3e49fa5.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 72, + 245, + 338, + 408 + ], + "blocks": [ + { + "bbox": [ + 198, + 231, + 212, + 243 + ], + "lines": [ + { + "bbox": [ + 198, + 231, + 212, + 243 + ], + "spans": [ + { + "bbox": [ + 198, + 231, + 212, + 243 + ], + "type": "text", + "content": "(a)" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 72, + 245, + 338, + 408 + ], + "lines": [ + { + "bbox": [ + 72, + 245, + 338, + 408 + ], + "spans": [ + { + "bbox": [ + 72, + 245, + 338, + 408 + ], + "type": "image", + "image_path": "38159ef7ae6f51b78b1f51b27ac07d7019b9006e97ddbd5054b02d32b076acac.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 198, + 415, + 212, + 428 + ], + "lines": [ + { + "bbox": [ + 198, + 415, + 212, + 428 + ], + "spans": [ + { + "bbox": [ + 198, + 415, + 212, + 428 + ], + "type": "text", + "content": "(b)" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 345, + 232, + 537, + 406 + ], + "blocks": [ + { + "bbox": [ + 345, + 232, + 537, + 406 + ], + "lines": [ + { + "bbox": [ + 345, + 232, + 537, + 406 + ], + "spans": [ + { + "bbox": [ + 345, + 232, + 537, + 406 + ], + "type": "image", + "image_path": "e8032220c0101b093c7abbf09b817da62b4a68f6faf283ab9fe582fdf70c1c5e.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 435, + 414, + 447, + 426 + ], + "lines": [ + { + "bbox": [ + 435, + 414, + 447, + 426 + ], + "spans": [ + { + "bbox": [ + 435, + 414, + 447, + 426 + ], + "type": "text", + "content": "(c)" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 67, + 436, + 542, + 510 + ], + "lines": [ + { + "bbox": [ + 67, + 436, + 542, + 510 + ], + "spans": [ + { + "bbox": [ + 67, + 436, + 542, + 510 + ], + "type": "text", + "content": "Figure 3: Downstream Advantages of WiSE-FT: (a) Best@K on MATH500 of the final SFT Gemma2-2B checkpoint and its WiSE-FT counterpart. (b) Pass@K on AIME24 WiSE-FT after SFT on general purpose reasoning dataset OpenThoughts-114k achieves higher Pass@K on AIME24. (c) RL Scaling Gemma and Qwen SFT checkpoints further tuned by GRPO on GSM8K and MATH, respectively. RL from the final WiSE-FT model achieves higher Pass@1 with less data compared to GRPO starting from both early and late SFT checkpoints." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 530, + 541, + 556 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 530, + 541, + 556 + ], + "spans": [ + { + "bbox": [ + 67, + 530, + 541, + 556 + ], + "type": "text", + "content": "higher diversity. We choose early checkpoint at epoch 3 where improvements in Pass@K begin to slow. Similarly, we observe that WiSE-FT improves both Pass@1 and Pass@K in Figure 2." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 69, + 569, + 338, + 586 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 569, + 338, + 586 + ], + "spans": [ + { + "bbox": [ + 69, + 569, + 338, + 586 + ], + "type": "text", + "content": "5 Diversity Collapse during Finetuning" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 67, + 597, + 541, + 635 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 597, + 541, + 635 + ], + "spans": [ + { + "bbox": [ + 67, + 597, + 541, + 635 + ], + "type": "text", + "content": "In previous sections we alluded to the phenomenon where " + }, + { + "bbox": [ + 67, + 597, + 541, + 635 + ], + "type": "inline_equation", + "content": "\\mathrm{Pass}@\\mathrm{K}" + }, + { + "bbox": [ + 67, + 597, + 541, + 635 + ], + "type": "text", + "content": " decreases because SFT and RL induces diversity collapse in reasoning traces. To verify this hypothesis, we sample 100 traces per test GSM8k problem and measure diversity using three metrics:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 92, + 644, + 542, + 728 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 92, + 644, + 459, + 658 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 92, + 644, + 459, + 658 + ], + "spans": [ + { + "bbox": [ + 92, + 644, + 459, + 658 + ], + "type": "text", + "content": "1. Answer Diversity: The fraction of unique guesses " + }, + { + "bbox": [ + 92, + 644, + 459, + 658 + ], + "type": "inline_equation", + "content": "\\hat{y}" + }, + { + "bbox": [ + 92, + 644, + 459, + 658 + ], + "type": "text", + "content": " among reasoning traces." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 92, + 662, + 542, + 699 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 92, + 662, + 542, + 699 + ], + "spans": [ + { + "bbox": [ + 92, + 662, + 542, + 699 + ], + "type": "text", + "content": "2. Operation Diversity: The fraction of unique sequence of arithmetic operations performed among reasoning traces (In GSM8k, each intermediate step consists of a basic arithmetic operation, e.g. " + }, + { + "bbox": [ + 92, + 662, + 542, + 699 + ], + "type": "inline_equation", + "content": "5 + 3 = 8" + }, + { + "bbox": [ + 92, + 662, + 542, + 699 + ], + "type": "text", + "content": ")." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 92, + 702, + 541, + 728 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 92, + 702, + 541, + 728 + ], + "spans": [ + { + "bbox": [ + 92, + 702, + 541, + 728 + ], + "type": "text", + "content": "3. Semantic Diversity: The average cosine similarity between the text embeddings of the reasoning traces, computed using Stella-400M-v5 (Zhang et al., 2024a)" + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 70, + 26, + 264, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 26, + 264, + 38 + ], + "spans": [ + { + "bbox": [ + 70, + 26, + 264, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 747, + 309, + 756 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 747, + 309, + 756 + ], + "spans": [ + { + "bbox": [ + 302, + 747, + 309, + 756 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 99, + 95, + 210, + 200 + ], + "blocks": [ + { + "bbox": [ + 99, + 95, + 210, + 200 + ], + "lines": [ + { + "bbox": [ + 99, + 95, + 210, + 200 + ], + "spans": [ + { + "bbox": [ + 99, + 95, + 210, + 200 + ], + "type": "image", + "image_path": "97906774fe3390f2c8dce9d365178b77721d1991265e137bf86ad95237738532.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 68, + 214, + 541, + 240 + ], + "lines": [ + { + "bbox": [ + 68, + 214, + 541, + 240 + ], + "spans": [ + { + "bbox": [ + 68, + 214, + 541, + 240 + ], + "type": "text", + "content": "Figure 4: Diversity Collapse The answer, semantic, and operation diversity of Gemma-2-2B reasoning traces across GSM8k test examples. Colors map to different SFT checkpoints." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 216, + 95, + 329, + 200 + ], + "blocks": [ + { + "bbox": [ + 216, + 95, + 329, + 200 + ], + "lines": [ + { + "bbox": [ + 216, + 95, + 329, + 200 + ], + "spans": [ + { + "bbox": [ + 216, + 95, + 329, + 200 + ], + "type": "image", + "image_path": "c09374f5d22f90dca1bd1db2225d9246275d5d6c3a44de40384a8f4f0172de11.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 333, + 89, + 443, + 200 + ], + "blocks": [ + { + "bbox": [ + 246, + 76, + 364, + 87 + ], + "lines": [ + { + "bbox": [ + 246, + 76, + 364, + 87 + ], + "spans": [ + { + "bbox": [ + 246, + 76, + 364, + 87 + ], + "type": "text", + "content": "Diversity Across SFT " + }, + { + "bbox": [ + 246, + 76, + 364, + 87 + ], + "type": "inline_equation", + "content": "[T = 0.8]" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 333, + 89, + 443, + 200 + ], + "lines": [ + { + "bbox": [ + 333, + 89, + 443, + 200 + ], + "spans": [ + { + "bbox": [ + 333, + 89, + 443, + 200 + ], + "type": "image", + "image_path": "6b301ba7c226d983f2678b214edd7278f1cfb4320bcae13b40fbe1456b86da77.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 446, + 88, + 507, + 196 + ], + "blocks": [ + { + "bbox": [ + 446, + 88, + 507, + 196 + ], + "lines": [ + { + "bbox": [ + 446, + 88, + 507, + 196 + ], + "spans": [ + { + "bbox": [ + 446, + 88, + 507, + 196 + ], + "type": "image", + "image_path": "0ca043e2ceef566089e4e9de31db5711daf3366c9e45fb0dd7cea3659355b3d9.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 72, + 256, + 541, + 387 + ], + "blocks": [ + { + "bbox": [ + 72, + 256, + 541, + 387 + ], + "lines": [ + { + "bbox": [ + 72, + 256, + 541, + 387 + ], + "spans": [ + { + "bbox": [ + 72, + 256, + 541, + 387 + ], + "type": "image", + "image_path": "33b82e197b5263aaec61d52cf001bea59054b3e26d2244a7f955f99bbd538652.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 394, + 541, + 443 + ], + "lines": [ + { + "bbox": [ + 67, + 394, + 541, + 443 + ], + "spans": [ + { + "bbox": [ + 67, + 394, + 541, + 443 + ], + "type": "text", + "content": "Figure 5: Pass@k for SFT and RL of Qwen-2.5-0.5B on GSM8K. The purple solid line measures Pass@K across SFT steps, while the dashed lines correspond to further training different checkpoints by Proximal Policy Optimization (PPO). While Pass@1 continues to improve, Pass@k for larger K can decrease even with RL." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 468, + 541, + 518 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 468, + 541, + 518 + ], + "spans": [ + { + "bbox": [ + 67, + 468, + 541, + 518 + ], + "type": "text", + "content": "As shown in Figure 4, we observe a stark trend where longer SFT on Gemma-2-2B incrementally suffers from clear diversity collapse across all diversity metrics. Specifically, the model places most of its probability mass not only on one particular guess, but on a single reasoning trace, as evidenced by the reduced semantic and operation diversity." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 68, + 529, + 440, + 544 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 529, + 440, + 544 + ], + "spans": [ + { + "bbox": [ + 68, + 529, + 440, + 544 + ], + "type": "text", + "content": "5.1 Theoretical Discussion of Diversity Collapse During SFT and RL" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 67, + 554, + 541, + 592 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 554, + 541, + 592 + ], + "spans": [ + { + "bbox": [ + 67, + 554, + 541, + 592 + ], + "type": "text", + "content": "We assess theoretically why diversity collapse tends to arise during SFT and RL training. Our analysis reveals that while SFT and RL operate on different principles, they share common pathways that lead to reduced generation diversity when optimizing for accuracy." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 67, + 619, + 541, + 727 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 619, + 541, + 727 + ], + "spans": [ + { + "bbox": [ + 67, + 619, + 541, + 727 + ], + "type": "text", + "content": "Diversity Collapse during SFT Overparameterized models are well-known to exhibit overconfidence in their predictions, an effect that has been studied extensively in classification (Guo et al., 2017). In particular, the model's confidence towards the most likely class " + }, + { + "bbox": [ + 67, + 619, + 541, + 727 + ], + "type": "inline_equation", + "content": "P(\\hat{y} = k_{\\max} \\mid x)" + }, + { + "bbox": [ + 67, + 619, + 541, + 727 + ], + "type": "text", + "content": " is often much higher than the model's accuracy. In binary classification with linear models " + }, + { + "bbox": [ + 67, + 619, + 541, + 727 + ], + "type": "inline_equation", + "content": "f(x) = \\sigma(\\langle \\boldsymbol{w}, \\boldsymbol{x} \\rangle)" + }, + { + "bbox": [ + 67, + 619, + 541, + 727 + ], + "type": "text", + "content": " and linearly separable training data, gradient descent provably drives the norm of the weights to infinity, causing probabilities to collapse to 0 or 1 (Soudry et al., 2018). We demonstrate this in linear models in Appendix A. A similar phenomenon likely arises in large reasoning models, which may also be prone to overfitting during SFT, ultimately leading to overly confident solutions in spite of limited coverage over the space of traces (Cobbe et al., 2021)." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 70, + 26, + 264, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 26, + 264, + 38 + ], + "spans": [ + { + "bbox": [ + 70, + 26, + 264, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 746, + 308, + 755 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 746, + 308, + 755 + ], + "spans": [ + { + "bbox": [ + 302, + 746, + 308, + 755 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 76, + 541, + 138 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 76, + 541, + 138 + ], + "spans": [ + { + "bbox": [ + 67, + 76, + 541, + 138 + ], + "type": "text", + "content": "Diversity Collapse during RL We further prove why applying reinforcement learning to a low-diversity policy yields suboptimal results—and sometimes even exacerbates diversity collapse—in a discrete bandit setting (see Figure 5). In this scenario, we assume there exist " + }, + { + "bbox": [ + 67, + 76, + 541, + 138 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 67, + 76, + 541, + 138 + ], + "type": "text", + "content": " equally good arms, corresponding to a set of successful strategies, and one bad arm that the policy should learn to avoid. We show two key results in this setting:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 154, + 542, + 312 + ], + "type": "list", + "angle": 0, + "index": 4, + "blocks": [ + { + "bbox": [ + 69, + 154, + 542, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 154, + 542, + 228 + ], + "spans": [ + { + "bbox": [ + 69, + 154, + 542, + 228 + ], + "type": "text", + "content": "1. Implicit Collapse of Policy Diversity without KL Regularization. Our analysis demonstrates that when standard reinforcement learning algorithms—REINFORCE and GRPO—are applied without KL regularization, the training dynamics inevitably lead to a collapse in output diversity. Although multiple arms (actions) are equally optimal, the updates become self-enforcing as training progresses. Once one of the good arms is randomly reinforced, its probability increases at the expense of the others, ultimately driving the policy to converge on a single-arm strategy (Theorem C.1)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 238, + 542, + 312 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 238, + 542, + 312 + ], + "spans": [ + { + "bbox": [ + 69, + 238, + 542, + 312 + ], + "type": "text", + "content": "2. Diversity Does Not Increase with KL Regularization. When KL regularization is incorporated to constrain the divergence from the initial policy in REINFORCE, the final policy no longer collapses into a single-arm strategy. However, the diversity of the converged policy cannot exceed the initial diversity. Concretely, we show that the probability distribution over the good arms remains proportional to the initial distribution when the RL algorithm converges (Theorem C.8). This explains why initializing with a diverse policy is critical for the generalization of reinforcement learning." + } + ] + } + ], + "index": 3 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 69, + 327, + 317, + 342 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 327, + 317, + 342 + ], + "spans": [ + { + "bbox": [ + 69, + 327, + 317, + 342 + ], + "type": "text", + "content": "6 Bias-Variance Tradeoff of Pass@K" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 361, + 542, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 361, + 542, + 411 + ], + "spans": [ + { + "bbox": [ + 67, + 361, + 542, + 411 + ], + "type": "text", + "content": "So far, we saw a mismatch in growth of Pass@1 and Pass@K during SFT and alluded to the impact of diversity collapse to Pass@K. We now formalize the relationship between Pass@1, Pass@K, and diversity collapse. Notably, we show that the upper bound of expected Pass@K over the test distribution can be decomposed into bias and variance quantities." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 68, + 426, + 400, + 440 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 426, + 400, + 440 + ], + "spans": [ + { + "bbox": [ + 68, + 426, + 400, + 440 + ], + "type": "text", + "content": "6.1 Diversity Collapse leads to Bimodal Pass@1 Distribution" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 452, + 542, + 501 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 452, + 542, + 501 + ], + "spans": [ + { + "bbox": [ + 67, + 452, + 542, + 501 + ], + "type": "text", + "content": "Consider the expected " + }, + { + "bbox": [ + 67, + 452, + 542, + 501 + ], + "type": "inline_equation", + "content": "\\mathrm{Pass}@\\mathrm{K}" + }, + { + "bbox": [ + 67, + 452, + 542, + 501 + ], + "type": "text", + "content": " over the entire test distribution " + }, + { + "bbox": [ + 67, + 452, + 542, + 501 + ], + "type": "inline_equation", + "content": "x, y \\sim \\mathcal{D}" + }, + { + "bbox": [ + 67, + 452, + 542, + 501 + ], + "type": "text", + "content": ". By Jensen's inequality, we can derive a straightforward upper bound of expected " + }, + { + "bbox": [ + 67, + 452, + 542, + 501 + ], + "type": "inline_equation", + "content": "\\mathrm{Pass}@\\mathrm{K}" + }, + { + "bbox": [ + 67, + 452, + 542, + 501 + ], + "type": "text", + "content": " that decomposes into the bias and variance of " + }, + { + "bbox": [ + 67, + 452, + 542, + 501 + ], + "type": "inline_equation", + "content": "1 - \\rho_x" + }, + { + "bbox": [ + 67, + 452, + 542, + 501 + ], + "type": "text", + "content": " (See proof in Appendix B). Note that the upper bound falls monotonically with larger bias and variance:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 93, + 524, + 460, + 555 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 93, + 524, + 460, + 555 + ], + "spans": [ + { + "bbox": [ + 93, + 524, + 460, + 555 + ], + "type": "interline_equation", + "content": "\\textbf {P r o p o s i t i o n 6 . 1 .} \\mathbb {E} _ {x, y \\sim \\mathcal {D}} [ \\operatorname {P a s s} @ \\mathrm {K} (x) ] \\leq 1 - ((\\underbrace {\\mathbb {E} _ {x , y \\sim \\mathcal {D}} [ 1 - \\rho_ {x} ]} _ {\\text {B i a s}}) ^ {2} + \\underbrace {\\operatorname {V a r} (\\rho_ {x})} _ {\\text {V a r i a n c e}}) ^ {k / 2}", + "image_path": "9ef913fae226c6c9e5f5118cb8299e0362f5d447174e6da0d58120967b130043.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 67, + 577, + 541, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 577, + 541, + 639 + ], + "spans": [ + { + "bbox": [ + 67, + 577, + 541, + 639 + ], + "type": "text", + "content": "In Figure 6b, we plot the distribution of error " + }, + { + "bbox": [ + 67, + 577, + 541, + 639 + ], + "type": "inline_equation", + "content": "1 - \\rho_{x}" + }, + { + "bbox": [ + 67, + 577, + 541, + 639 + ], + "type": "text", + "content": ", estimated using 100 sampled traces, over GSM8K test examples. We notice two trends with longer SFT. First, bias decreases, i.e., the expected error shifts towards 0. However, the distribution becomes increasingly bimodal with the densities converging towards the two extremes 0 and 1. As a result, the variance increases with longer SFT. This increase in variance directly explains the drop in Pass@k." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 67, + 643, + 542, + 730 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 643, + 542, + 730 + ], + "spans": [ + { + "bbox": [ + 67, + 643, + 542, + 730 + ], + "type": "text", + "content": "The bimodality of the " + }, + { + "bbox": [ + 67, + 643, + 542, + 730 + ], + "type": "inline_equation", + "content": "1 - \\rho_{x}" + }, + { + "bbox": [ + 67, + 643, + 542, + 730 + ], + "type": "text", + "content": " distribution means that the Pass@1 of any test problem is either very high or very low. Interestingly, one explanation for the increased bimodality of the distribution of " + }, + { + "bbox": [ + 67, + 643, + 542, + 730 + ], + "type": "inline_equation", + "content": "1 - \\rho_{x}" + }, + { + "bbox": [ + 67, + 643, + 542, + 730 + ], + "type": "text", + "content": " is in fact when models suffer from diversity collapse. In other words, a particular guess to be oversampled for each test problem. If the model places high probability on an incorrect guess, Pass@1 is very low. On the other hand, if the model places high probability on the correct guess, Pass@1 is very high. We illustrate this relationship in Figure 6a. All in all, Pass@K can be improved in two ways - either reduce bias by improving Pass@1 or reduce variance by increasing diversity." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 69, + 26, + 264, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 26, + 264, + 38 + ], + "spans": [ + { + "bbox": [ + 69, + 26, + 264, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 746, + 309, + 756 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 746, + 309, + 756 + ], + "spans": [ + { + "bbox": [ + 302, + 746, + 309, + 756 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 71, + 75, + 542, + 198 + ], + "blocks": [ + { + "bbox": [ + 71, + 75, + 542, + 198 + ], + "lines": [ + { + "bbox": [ + 71, + 75, + 542, + 198 + ], + "spans": [ + { + "bbox": [ + 71, + 75, + 542, + 198 + ], + "type": "image", + "image_path": "105ec9b2f770f3fd0623182efd6b919cba9dbb10c81c24b7a52fedf2e526985a.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 299, + 201, + 312, + 213 + ], + "lines": [ + { + "bbox": [ + 299, + 201, + 312, + 213 + ], + "spans": [ + { + "bbox": [ + 299, + 201, + 312, + 213 + ], + "type": "text", + "content": "(a)" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 71, + 215, + 544, + 361 + ], + "blocks": [ + { + "bbox": [ + 71, + 215, + 544, + 361 + ], + "lines": [ + { + "bbox": [ + 71, + 215, + 544, + 361 + ], + "spans": [ + { + "bbox": [ + 71, + 215, + 544, + 361 + ], + "type": "image", + "image_path": "401631f09a461d1e42f689302a470ca43729d0d0654d63edcc98eccaf75cd6ba.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 298, + 366, + 313, + 379 + ], + "lines": [ + { + "bbox": [ + 298, + 366, + 313, + 379 + ], + "spans": [ + { + "bbox": [ + 298, + 366, + 313, + 379 + ], + "type": "text", + "content": "(b)" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 68, + 388, + 544, + 437 + ], + "lines": [ + { + "bbox": [ + 68, + 388, + 544, + 437 + ], + "spans": [ + { + "bbox": [ + 68, + 388, + 544, + 437 + ], + "type": "text", + "content": "Figure 6: Histogram of error " + }, + { + "bbox": [ + 68, + 388, + 544, + 437 + ], + "type": "inline_equation", + "content": "1 - \\rho_{x}" + }, + { + "bbox": [ + 68, + 388, + 544, + 437 + ], + "type": "text", + "content": " of Gemma-2-2B SFT checkpoints across GSM8k test. SFT progressively decreases bias but increases variance of error i.e., " + }, + { + "bbox": [ + 68, + 388, + 544, + 437 + ], + "type": "inline_equation", + "content": "1 - \\mathrm{Pass}@\\mathrm{l}" + }, + { + "bbox": [ + 68, + 388, + 544, + 437 + ], + "type": "text", + "content": ", across the test distribution, causing Pass@K to fall. Applying Wise-FT reduces both bias and variance, but temperature scaling trades off decreasing variance with increased bias." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 457, + 258, + 472 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 457, + 258, + 472 + ], + "spans": [ + { + "bbox": [ + 69, + 457, + 258, + 472 + ], + "type": "text", + "content": "6.2 WiSE-FT vs. Diverse Decoding" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 480, + 543, + 626 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 480, + 543, + 626 + ], + "spans": [ + { + "bbox": [ + 67, + 480, + 543, + 626 + ], + "type": "text", + "content": "While we've proposed inducing diversity by WiSE-FT, another common alternative for inducing diversity is temperature scaling the logits. High temperature smoothens the logits allowing the model to more likely sample low probability tokens. In Figure 1, we see that while high temperatures indeed improve Pass@K, the Pass@K at any SFT timestep notably never reaches the Pass@K of our final WiSE-FT model. If temperature scaling also increases diversity, why does WiSE-FT strictly outperform sampling with high temperature? In Figure 6b, we plot the distribution of " + }, + { + "bbox": [ + 67, + 480, + 543, + 626 + ], + "type": "inline_equation", + "content": "1 - \\rho_{x}" + }, + { + "bbox": [ + 67, + 480, + 543, + 626 + ], + "type": "text", + "content": " if we sample from the last SFT checkpoint with high temperature " + }, + { + "bbox": [ + 67, + 480, + 543, + 626 + ], + "type": "inline_equation", + "content": "T = 1.5" + }, + { + "bbox": [ + 67, + 480, + 543, + 626 + ], + "type": "text", + "content": ". As expected, we see that the model reasons more diversely. This smoothens the bimodal peaks and reduces the variance. However, the average accuracy of the model generations also degrades, causing the bias goes back up. We suspect bias-variance tradeoff is inherent in diversity-inducing decoding approaches. For example, min-p (Nguyen et al., 2024) combines temperature scaling with adaptive thresholding to not sample outlier tokens. However, this additional control is unable to reduce bias (Figure 16). Surprisingly, WiSE-FT uniquely manages to reduce both bias and variance." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 69, + 638, + 164, + 652 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 638, + 164, + 652 + ], + "spans": [ + { + "bbox": [ + 69, + 638, + 164, + 652 + ], + "type": "text", + "content": "7 Discussion" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 666, + 543, + 730 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 666, + 543, + 730 + ], + "spans": [ + { + "bbox": [ + 67, + 666, + 543, + 730 + ], + "type": "text", + "content": "In this work, we investigated the phenomenon of diversity collapse during the training of reasoning models. Our analysis reveals that standard SFT and RL pipelines can deteriorate in Pass@ " + }, + { + "bbox": [ + 67, + 666, + 543, + 730 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 67, + 666, + 543, + 730 + ], + "type": "text", + "content": " due to the convergence of model generations toward a single reasoning trace. We demonstrated that WiSE-FT, which interpolates between early and late SFT checkpoints, significantly improves both Pass@1 and Pass@ " + }, + { + "bbox": [ + 67, + 666, + 543, + 730 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 67, + 666, + 543, + 730 + ], + "type": "text", + "content": " across multiple math datasets and model scales. This is unlike alternative approaches such as temperature scaling or early" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 70, + 26, + 264, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 26, + 264, + 38 + ], + "spans": [ + { + "bbox": [ + 70, + 26, + 264, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 746, + 309, + 755 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 746, + 309, + 755 + ], + "spans": [ + { + "bbox": [ + 302, + 746, + 309, + 755 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 76, + 543, + 113 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 76, + 543, + 113 + ], + "spans": [ + { + "bbox": [ + 67, + 76, + 543, + 113 + ], + "type": "text", + "content": "stopping, which face an inherent tradeoff. Furthermore, improving on these metrics corresponded with better adaptation to test-time scaling and RL. But other limitations of WiSE-FT may exist at larger scale, which we leave for future work." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 118, + 362, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 118, + 362, + 228 + ], + "spans": [ + { + "bbox": [ + 67, + 118, + 362, + 228 + ], + "type": "text", + "content": "Overall, our work reveals the importance of maintaining diversity in reasoning models. Current decoding strategies (e.g., min-p, nucleus, and top-k) are still unable to fully extract a model's capabilities. We estimate that a significant gap, of tens of percent, remains compared to the optimal decoding strategy for Pass@K, i.e., top-K sampling over the model's marginal answer distribution " + }, + { + "bbox": [ + 67, + 118, + 362, + 228 + ], + "type": "inline_equation", + "content": "P(\\hat{y} \\mid x)" + }, + { + "bbox": [ + 67, + 118, + 362, + 228 + ], + "type": "text", + "content": " (see Table 1 and Appendix G). We encourage future works to address downstream limitations more carefully in earlier stages of the training pipeline." + } + ] + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 383, + 131, + 529, + 196 + ], + "blocks": [ + { + "bbox": [ + 383, + 131, + 529, + 196 + ], + "lines": [ + { + "bbox": [ + 383, + 131, + 529, + 196 + ], + "spans": [ + { + "bbox": [ + 383, + 131, + 529, + 196 + ], + "type": "table", + "html": "
MethodPass@2Pass@4
Nucleus0.570.67
Min-p0.570.67
Top-k0.560.67
Optimal0.760.83
", + "image_path": "268a4214b18b790ae18f4c66932eb03706a3662c1ff2bbd2235424d0bcd92783.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 366, + 203, + 544, + 229 + ], + "lines": [ + { + "bbox": [ + 366, + 203, + 544, + 229 + ], + "spans": [ + { + "bbox": [ + 366, + 203, + 544, + 229 + ], + "type": "text", + "content": "Table 1: Best Pass@k of Gemma on GSM8k across SFT checkpoints" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 69, + 239, + 223, + 257 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 239, + 223, + 257 + ], + "spans": [ + { + "bbox": [ + 69, + 239, + 223, + 257 + ], + "type": "text", + "content": "8 Acknowledgements" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 268, + 542, + 330 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 268, + 542, + 330 + ], + "spans": [ + { + "bbox": [ + 67, + 268, + 542, + 330 + ], + "type": "text", + "content": "We'd like to thank Aviral Kumar, Sean Welleck, Amrith Setlur and Yiding Jiang for insightful discussions about test-time scaling and reinforcement learning. We'd also like to thank Alex Li, Sachin Goyal, and Jacob Springer for their meaningful contribution to our figures and literature review. We gratefully acknowledge support from Apple, Google, Cisco, OpenAI, NSF, Okawa foundation, the AI2050 program at Schmidt Sciences (Grant #G2264481), and Bosch Center for AI." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 341, + 144, + 357 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 341, + 144, + 357 + ], + "spans": [ + { + "bbox": [ + 69, + 341, + 144, + 357 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 69, + 370, + 542, + 728 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 70, + 370, + 542, + 407 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 370, + 542, + 407 + ], + "spans": [ + { + "bbox": [ + 70, + 370, + 542, + 407 + ], + "type": "text", + "content": "AlphaProof and AlphaGeometry teams. Ai achieves silver-medal standard solving international mathematical olympiad problems, jul 2024. URL https://deepmind.google/discover/blog/ai-solves-imo-problems-at-silver-medal-level/." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 70, + 415, + 542, + 441 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 415, + 542, + 441 + ], + "spans": [ + { + "bbox": [ + 70, + 415, + 542, + 441 + ], + "type": "text", + "content": "Edward Beeching, Lewis Tunstall, and Sasha Rush. Scaling test-time compute with open models, 2024. URL https://huggingface.co/spaces/HuggingFaceH4/blogpost-scaling-test-time-compute." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 70, + 447, + 542, + 472 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 447, + 542, + 472 + ], + "spans": [ + { + "bbox": [ + 70, + 447, + 542, + 472 + ], + "type": "text", + "content": "Jeff Bilmes. Submodularity in machine learning and artificial intelligence. arXiv preprint arXiv:2202.00132, 2022." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 70, + 479, + 542, + 516 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 479, + 542, + 516 + ], + "spans": [ + { + "bbox": [ + 70, + 479, + 542, + 516 + ], + "type": "text", + "content": "Feng Chen, Allan Raventos, Nan Cheng, Surya Ganguli, and Shaul Druckmann. Rethinking fine-tuning when scaling test-time compute: Limiting confidence improves mathematical reasoning. arXiv preprint arXiv:2502.07154, 2025." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 69, + 525, + 541, + 550 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 525, + 541, + 550 + ], + "spans": [ + { + "bbox": [ + 69, + 525, + 541, + 550 + ], + "type": "text", + "content": "Guoxin Chen, Minpeng Liao, Chengxi Li, and Kai Fan. Alphamath almost zero: Process supervision without process, 2024. URL https://arxiv.org/abs/2405.03553." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 69, + 556, + 542, + 594 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 556, + 542, + 594 + ], + "spans": [ + { + "bbox": [ + 69, + 556, + 542, + 594 + ], + "type": "text", + "content": "Yinlam Chow, Guy Tennenholtz, Izzeddin Gur, Vincent Zhuang, Bo Dai, Sridhar Thiagarajan, Craig Boutilier, Rishabh Agarwal, Aviral Kumar, and Aleksandra Faust. Inference-aware fine-tuning for best-of-n sampling in large language models. arXiv preprint arXiv:2412.15287, 2024a." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 69, + 601, + 542, + 639 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 601, + 542, + 639 + ], + "spans": [ + { + "bbox": [ + 69, + 601, + 542, + 639 + ], + "type": "text", + "content": "Yinlam Chow, Guy Tennenholtz, Izzeddin Gur, Vincent Zhuang, Bo Dai, Sridhar Thiagarajan, Craig Boutilier, Rishabh Agarwal, Aviral Kumar, and Aleksandra Faust. Inference-aware fine-tuning for best-of-n sampling in large language models, 2024b. URL https://arxiv.org/abs/2412.15287." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 69, + 646, + 542, + 684 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 646, + 542, + 684 + ], + "spans": [ + { + "bbox": [ + 69, + 646, + 542, + 684 + ], + "type": "text", + "content": "Tianzhe Chu, Yuexiang Zhai, Jihan Yang, Shengbang Tong, Saining Xie, Dale Schuurmans, Quoc V. Le, Sergey Levine, and Yi Ma. Sft memorizes, rl generalizes: A comparative study of foundation model post-training, 2025. URL https://arxiv.org/abs/2501.17161." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 69, + 690, + 542, + 728 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 690, + 542, + 728 + ], + "spans": [ + { + "bbox": [ + 69, + 690, + 542, + 728 + ], + "type": "text", + "content": "Karl Cobbe, Vineet Kosaraju, Mohammad Bavarian, Mark Chen, Heewoo Jun, Lukasz Kaiser, Matthias Plappert, Jerry Tworek, Jacob Hilton, Reiichiro Nakano, Christopher Hesse, and John Schulman. Training verifiers to solve math word problems, 2021. URL https://arxiv.org/abs/2110.14168." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 69, + 26, + 264, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 26, + 264, + 38 + ], + "spans": [ + { + "bbox": [ + 69, + 26, + 264, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 746, + 311, + 756 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 746, + 311, + 756 + ], + "spans": [ + { + "bbox": [ + 300, + 746, + 311, + 756 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 76, + 541, + 727 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 70, + 76, + 541, + 102 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 76, + 541, + 102 + ], + "spans": [ + { + "bbox": [ + 70, + 76, + 541, + 102 + ], + "type": "text", + "content": "Chuan Guo, Geoff Pleiss, Yu Sun, and Kilian Q Weinberger. On calibration of modern neural networks. In International conference on machine learning, pp. 1321-1330. PMLR, 2017." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 70, + 108, + 541, + 144 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 108, + 541, + 144 + ], + "spans": [ + { + "bbox": [ + 70, + 108, + 541, + 144 + ], + "type": "text", + "content": "Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948, 2025." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 70, + 152, + 541, + 187 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 152, + 541, + 187 + ], + "spans": [ + { + "bbox": [ + 70, + 152, + 541, + 187 + ], + "type": "text", + "content": "Dan Hendrycks, Collin Burns, Saurav Kadavath, Akul Arora, Steven Basart, Eric Tang, Dawn Song, and Jacob Steinhardt. Measuring mathematical problem solving with the math dataset, 2021. URL https://arxiv.org/abs/2103.03874." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 70, + 194, + 541, + 219 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 194, + 541, + 219 + ], + "spans": [ + { + "bbox": [ + 70, + 194, + 541, + 219 + ], + "type": "text", + "content": "Ari Holtzman, Jan Buys, Li Du, Maxwell Forbes, and Yejin Choi. The curious case of neural text degeneration, 2020. URL https://arxiv.org/abs/1904.09751." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 70, + 225, + 541, + 262 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 225, + 541, + 262 + ], + "spans": [ + { + "bbox": [ + 70, + 225, + 541, + 262 + ], + "type": "text", + "content": "Audrey Huang, Adam Block, Dylan J Foster, Dhruv Rohatgi, Cyril Zhang, Max Simchowitz, Jordan T Ash, and Akshay Krishnamurthy. Self-improvement in language models: The sharpening mechanism. arXiv preprint arXiv:2412.01951, 2024." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 70, + 270, + 541, + 305 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 270, + 541, + 305 + ], + "spans": [ + { + "bbox": [ + 70, + 270, + 541, + 305 + ], + "type": "text", + "content": "Yifei Li, Zeqi Lin, Shizhuo Zhang, Qiang Fu, Bei Chen, Jian-Guang Lou, and Weizhu Chen. Making large language models better reasoners with step-aware verifier, 2023. URL https://arxiv.org/abs/2206.02336." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 70, + 312, + 541, + 350 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 312, + 541, + 350 + ], + "spans": [ + { + "bbox": [ + 70, + 312, + 541, + 350 + ], + "type": "text", + "content": "Ziniu Li, Congliang Chen, Tian Xu, Zeyu Qin, Jiancong Xiao, Zhi-Quan Luo, and Ruoyu Sun. Preserving diversity in supervised fine-tuning of large language models. In The Thirteenth International Conference on Learning Representations, 2025. URL https://openreview.net/forum?id=NQEe7B7bSw." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 69, + 356, + 541, + 381 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 356, + 541, + 381 + ], + "spans": [ + { + "bbox": [ + 69, + 356, + 541, + 381 + ], + "type": "text", + "content": "Hunter Lightman, Vineet Kosaraju, Yura Burda, Harri Edwards, Bowen Baker, Teddy Lee, Jan Leike, John Schulman, Ilya Sutskever, and Karl Cobbe. Let's verify step by step. arXiv preprint arXiv:2305.20050, 2023." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 70, + 388, + 541, + 423 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 388, + 541, + 423 + ], + "spans": [ + { + "bbox": [ + 70, + 388, + 541, + 423 + ], + "type": "text", + "content": "Niklas Muennighoff, Zitong Yang, Weijia Shi, Xiang Lisa Li, Li Fei-Fei, Hannaneh Hajishirzi, Luke Zettlemoyer, Percy Liang, Emmanuel Candès, and Tatsunori Hashimoto. s1: Simple test-time scaling, 2025. URL https://arxiv.org/abs/2501.19393." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 70, + 430, + 541, + 466 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 430, + 541, + 466 + ], + "spans": [ + { + "bbox": [ + 70, + 430, + 541, + 466 + ], + "type": "text", + "content": "Minh Nguyen, Andrew Baker, Clement Neo, Allen Roush, Andreas Kirsch, and Ravid Shwartz-Ziv. Turning up the heat: Min-p sampling for creative and coherent llm outputs, 2024. URL https://arxiv.org/abs/2407.01082." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 69, + 474, + 541, + 499 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 474, + 541, + 499 + ], + "spans": [ + { + "bbox": [ + 69, + 474, + 541, + 499 + ], + "type": "text", + "content": "Theo X. Olausson, Jeevana Priya Inala, Chenglong Wang, Jianfeng Gao, and Armando Solar-Lezama. Is self-repair a silver bullet for code generation?, 2024. URL https://arxiv.org/abs/2306.09896." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 70, + 506, + 541, + 566 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 506, + 541, + 566 + ], + "spans": [ + { + "bbox": [ + 70, + 506, + 541, + 566 + ], + "type": "text", + "content": "Pier Giuseppe Sessa, Robert Dadashi, Léonard Hussenot, Johan Ferret, Nino Vieillard, Alexandre Ramé, Bobak Shariari, Sarah Perrin, Abe Friesen, Geoffrey Cideron, Sertan Girgin, Piotr Stanczyk, Andrea Michi, Danila Sinopalnikov, Sabela Ramos, Amélie Héliou, Aliaksei Severyn, Matt Hoffman, Nikola Momchev, and Olivier Bachem. Bond: Aligning llms with best-of-n distillation, 2024. URL https://arxiv.org/abs/2407.14622." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 69, + 572, + 541, + 609 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 572, + 541, + 609 + ], + "spans": [ + { + "bbox": [ + 69, + 572, + 541, + 609 + ], + "type": "text", + "content": "Amrith Setlur, Chirag Nagpal, Adam Fisch, Xinyang Geng, Jacob Eisenstein, Rishabh Agarwal, Alekh Agarwal, Jonathan Berant, and Aviral Kumar. Rewarding progress: Scaling automated process verifiers for llm reasoning, 2024. URL https://arxiv.org/abs/2410.08146." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 69, + 616, + 541, + 652 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 616, + 541, + 652 + ], + "spans": [ + { + "bbox": [ + 69, + 616, + 541, + 652 + ], + "type": "text", + "content": "Louis Shao, Stephan Gouws, Denny Britz, Anna Goldie, Brian Strope, and Ray Kurzweil. Generating high-quality and informative conversation responses with sequence-to-sequence models. arXiv preprint arXiv:1701.03185, 2017." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 69, + 659, + 541, + 685 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 659, + 541, + 685 + ], + "spans": [ + { + "bbox": [ + 69, + 659, + 541, + 685 + ], + "type": "text", + "content": "Charlie Snell, Jaehoon Lee, Kelvin Xu, and Aviral Kumar. Scaling llm test-time compute optimally can be more effective than scaling model parameters, 2024. URL https://arxiv.org/abs/2408.03314." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 69, + 690, + 541, + 727 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 690, + 541, + 727 + ], + "spans": [ + { + "bbox": [ + 69, + 690, + 541, + 727 + ], + "type": "text", + "content": "Yuda Song, Hanlin Zhang, Carson Eisenach, Sham Kakade, Dean Foster, and Udaya Ghai. Mind the gap: Examining the self-improvement capabilities of large language models. arXiv preprint arXiv:2412.02674, 2024." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 69, + 26, + 264, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 26, + 264, + 38 + ], + "spans": [ + { + "bbox": [ + 69, + 26, + 264, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 747, + 310, + 756 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 747, + 310, + 756 + ], + "spans": [ + { + "bbox": [ + 300, + 747, + 310, + 756 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 76, + 541, + 449 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 70, + 76, + 541, + 102 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 76, + 541, + 102 + ], + "spans": [ + { + "bbox": [ + 70, + 76, + 541, + 102 + ], + "type": "text", + "content": "Daniel Soudry, Elad Hoffer, Mor Shpigel Nacson, Suriya Gunasekar, and Nathan Srebro. The implicit bias of gradient descent on separable data. Journal of Machine Learning Research, 19(70):1-57, 2018." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 70, + 108, + 541, + 144 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 108, + 541, + 144 + ], + "spans": [ + { + "bbox": [ + 70, + 108, + 541, + 144 + ], + "type": "text", + "content": "Ashwin K Vijayakumar, Michael Cogswell, Ramprasath R. Selvaraju, Qing Sun, Stefan Lee, David Crandall, and Dhruv Batra. Diverse beam search: Decoding diverse solutions from neural sequence models, 2018. URL https://arxiv.org/abs/1610.02424." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 70, + 152, + 541, + 189 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 152, + 541, + 189 + ], + "spans": [ + { + "bbox": [ + 70, + 152, + 541, + 189 + ], + "type": "text", + "content": "Xuezhi Wang, Jason Wei, Dale Schuurmans, Quoc Le, Ed Chi, Sharan Narang, Aakanksha Chowdhery, and Denny Zhou. Self-consistency improves chain of thought reasoning in language models, 2023. URL https://arxiv.org/abs/2203.11171." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 70, + 196, + 541, + 233 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 196, + 541, + 233 + ], + "spans": [ + { + "bbox": [ + 70, + 196, + 541, + 233 + ], + "type": "text", + "content": "Mitchell Wortsman, Gabriel Ilharco, Jong Wook Kim, Mike Li, Simon Kornblith, Rebecca Roelofs, Raphael Gontijo-Lopes, Hannaneh Hajishirzi, Ali Farhadi, Hongseok Namkoong, and Ludwig Schmidt. Robust fine-tuning of zero-shot models, 2022. URL https://arxiv.org/abs/2109.01903." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 70, + 239, + 541, + 277 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 239, + 541, + 277 + ], + "spans": [ + { + "bbox": [ + 70, + 239, + 541, + 277 + ], + "type": "text", + "content": "Yangzhen Wu, Zhiqing Sun, Shanda Li, Sean Welleck, and Yiming Yang. Inference scaling laws: An empirical analysis of compute-optimal inference for problem-solving with language models. arXiv preprint arXiv:2408.00724, 2024." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 284, + 541, + 309 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 284, + 541, + 309 + ], + "spans": [ + { + "bbox": [ + 69, + 284, + 541, + 309 + ], + "type": "text", + "content": "Wei Xiong, Hanning Zhang, Nan Jiang, and Tong Zhang. An implementation of generative prm. https://github.com/RLHFlow/RLHF-Reward-Modeling, 2024." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 70, + 316, + 541, + 341 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 316, + 541, + 341 + ], + "spans": [ + { + "bbox": [ + 70, + 316, + 541, + 341 + ], + "type": "text", + "content": "Edward Yeo, Yuxuan Tong, Morry Niu, Graham Neubig, and Xiang Yue. Demystifying long chain-of-thought reasoning in llms, 2025. URL https://arxiv.org/abs/2502.03373." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 70, + 347, + 541, + 384 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 347, + 541, + 384 + ], + "spans": [ + { + "bbox": [ + 70, + 347, + 541, + 384 + ], + "type": "text", + "content": "Longhui Yu, Weisen Jiang, Han Shi, Jincheng Yu, Zhengying Liu, Yu Zhang, James T Kwok, Zhenguo Li, Adrian Weller, and Weiyang Liu. Metamath: Bootstrap your own mathematical questions for large language models. arXiv preprint arXiv:2309.12284, 2023." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 70, + 391, + 541, + 417 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 391, + 541, + 417 + ], + "spans": [ + { + "bbox": [ + 70, + 391, + 541, + 417 + ], + "type": "text", + "content": "Dun Zhang, Jiacheng Li, Ziyang Zeng, and Fulong Wang. Jasper and stella: distillation of sota embedding models. arXiv preprint arXiv:2412.19048, 2024a." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 70, + 422, + 541, + 449 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 422, + 541, + 449 + ], + "spans": [ + { + "bbox": [ + 70, + 422, + 541, + 449 + ], + "type": "text", + "content": "Yiming Zhang, Avi Schwarzschild, Nicholas Carlini, Zico Kolter, and Daphne Ippolito. Forcing diffuse distributions out of language models, 2024b. URL https://arxiv.org/abs/2404.10859." + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 70, + 26, + 264, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 26, + 264, + 38 + ], + "spans": [ + { + "bbox": [ + 70, + 26, + 264, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 746, + 310, + 756 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 746, + 310, + 756 + ], + "spans": [ + { + "bbox": [ + 300, + 746, + 310, + 756 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 76, + 280, + 92 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 76, + 280, + 92 + ], + "spans": [ + { + "bbox": [ + 69, + 76, + 280, + 92 + ], + "type": "text", + "content": "A SFT in Binary Classification" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 110, + 541, + 135 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 110, + 541, + 135 + ], + "spans": [ + { + "bbox": [ + 67, + 110, + 541, + 135 + ], + "type": "text", + "content": "Data and Model Setup We train a linear classifier " + }, + { + "bbox": [ + 67, + 110, + 541, + 135 + ], + "type": "inline_equation", + "content": "f(\\pmb{x}) = \\langle \\pmb{w}, \\pmb{x} \\rangle" + }, + { + "bbox": [ + 67, + 110, + 541, + 135 + ], + "type": "text", + "content": " from random initialization over a binary Gaussian mixture distribution:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 259, + 147, + 541, + 163 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 259, + 147, + 541, + 163 + ], + "spans": [ + { + "bbox": [ + 259, + 147, + 541, + 163 + ], + "type": "interline_equation", + "content": "x \\mid y \\sim \\mathcal {N} (y \\boldsymbol {\\mu}, I ^ {d \\times d}) \\tag {3}", + "image_path": "e6835df7d8541ad8ff95d0e959e32537072541d9347a870c518954a0ff19d3c8.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 252, + 165, + 541, + 178 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 252, + 165, + 541, + 178 + ], + "spans": [ + { + "bbox": [ + 252, + 165, + 541, + 178 + ], + "type": "interline_equation", + "content": "y \\in \\{1, - 1 \\} \\text {u n i f o r m l y} \\tag {4}", + "image_path": "c8372a03f4c6999a7525a65022b672de503002d5509ddfff70a3d911541e6379.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 191, + 542, + 231 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 191, + 542, + 231 + ], + "spans": [ + { + "bbox": [ + 67, + 191, + 542, + 231 + ], + "type": "text", + "content": "Given a model, we sample predictions, namely " + }, + { + "bbox": [ + 67, + 191, + 542, + 231 + ], + "type": "inline_equation", + "content": "\\hat{y} = 1" + }, + { + "bbox": [ + 67, + 191, + 542, + 231 + ], + "type": "text", + "content": " with probability " + }, + { + "bbox": [ + 67, + 191, + 542, + 231 + ], + "type": "inline_equation", + "content": "\\sigma (\\langle \\pmb {w},\\pmb {x}\\rangle) = (1 + \\exp (-\\langle \\pmb {w},\\pmb {x}\\rangle))^{-1}" + }, + { + "bbox": [ + 67, + 191, + 542, + 231 + ], + "type": "text", + "content": ", or " + }, + { + "bbox": [ + 67, + 191, + 542, + 231 + ], + "type": "inline_equation", + "content": "\\hat{y} = 0" + }, + { + "bbox": [ + 67, + 191, + 542, + 231 + ], + "type": "text", + "content": ". Then, per-example Pass@1 is equal to " + }, + { + "bbox": [ + 67, + 191, + 542, + 231 + ], + "type": "inline_equation", + "content": "\\rho_{x} = \\sigma (y\\cdot \\langle \\pmb {w},\\pmb {x}\\rangle)" + }, + { + "bbox": [ + 67, + 191, + 542, + 231 + ], + "type": "text", + "content": ". Similarly, the expected Pass@k is equal to " + }, + { + "bbox": [ + 67, + 191, + 542, + 231 + ], + "type": "inline_equation", + "content": "1 - (1 - \\rho_{x})^{k}" + }, + { + "bbox": [ + 67, + 191, + 542, + 231 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 235, + 416, + 324 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 235, + 416, + 324 + ], + "spans": [ + { + "bbox": [ + 67, + 235, + 416, + 324 + ], + "type": "text", + "content": "In our experiment, we train an overparametrized linear classifier over binary Gaussian data mixture " + }, + { + "bbox": [ + 67, + 235, + 416, + 324 + ], + "type": "inline_equation", + "content": "x \\mid y \\sim \\mathcal{N}(y \\cdot \\frac{1}{\\sqrt{d}} \\mathbf{1}, \\frac{1}{2} I)" + }, + { + "bbox": [ + 67, + 235, + 416, + 324 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 67, + 235, + 416, + 324 + ], + "type": "inline_equation", + "content": "y = \\{-1, 1\\}" + }, + { + "bbox": [ + 67, + 235, + 416, + 324 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 67, + 235, + 416, + 324 + ], + "type": "inline_equation", + "content": "d = 1000" + }, + { + "bbox": [ + 67, + 235, + 416, + 324 + ], + "type": "text", + "content": ". We then evaluate " + }, + { + "bbox": [ + 67, + 235, + 416, + 324 + ], + "type": "inline_equation", + "content": "\\rho_x" + }, + { + "bbox": [ + 67, + 235, + 416, + 324 + ], + "type": "text", + "content": " of 400 test samples. As training progresses, the distribution of " + }, + { + "bbox": [ + 67, + 235, + 416, + 324 + ], + "type": "inline_equation", + "content": "\\rho_x" + }, + { + "bbox": [ + 67, + 235, + 416, + 324 + ], + "type": "text", + "content": " over the test data becomes bimodal due to the norm of " + }, + { + "bbox": [ + 67, + 235, + 416, + 324 + ], + "type": "inline_equation", + "content": "w" + }, + { + "bbox": [ + 67, + 235, + 416, + 324 + ], + "type": "text", + "content": " monotonically increasing once it separates the training examples. Similarly, we observe that this leads to a drop in Pass@k while Pass@1 continues to improve." + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 424, + 229, + 537, + 320 + ], + "blocks": [ + { + "bbox": [ + 424, + 229, + 537, + 320 + ], + "lines": [ + { + "bbox": [ + 424, + 229, + 537, + 320 + ], + "spans": [ + { + "bbox": [ + 424, + 229, + 537, + 320 + ], + "type": "image", + "image_path": "110fab68254d7edd76626c12dee15bae4c5510f3d1620d88c62cdb6cd3e849b2.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 80, + 360, + 167, + 445 + ], + "blocks": [ + { + "bbox": [ + 80, + 360, + 167, + 445 + ], + "lines": [ + { + "bbox": [ + 80, + 360, + 167, + 445 + ], + "spans": [ + { + "bbox": [ + 80, + 360, + 167, + 445 + ], + "type": "image", + "image_path": "136c97bd309561e7ca54a0ca2069ad3f0b521147a93b614b1fb712de45f0c740.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 173, + 459, + 437, + 472 + ], + "lines": [ + { + "bbox": [ + 173, + 459, + 437, + 472 + ], + "spans": [ + { + "bbox": [ + 173, + 459, + 437, + 472 + ], + "type": "text", + "content": "Figure 8: Pass@k across Training in Binary Classification" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 171, + 361, + 258, + 444 + ], + "blocks": [ + { + "bbox": [ + 171, + 361, + 258, + 444 + ], + "lines": [ + { + "bbox": [ + 171, + 361, + 258, + 444 + ], + "spans": [ + { + "bbox": [ + 171, + 361, + 258, + 444 + ], + "type": "image", + "image_path": "235ace59d9f3a9fd4138d33eda84ec30aa842d50403a6325e09b14c22038f792.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 263, + 361, + 350, + 444 + ], + "blocks": [ + { + "bbox": [ + 263, + 361, + 350, + 444 + ], + "lines": [ + { + "bbox": [ + 263, + 361, + 350, + 444 + ], + "spans": [ + { + "bbox": [ + 263, + 361, + 350, + 444 + ], + "type": "image", + "image_path": "bd859f21719d1d07b48f5e32d3cf6033dc8039de333fc86dee2565d05cfa3961.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 352, + 361, + 440, + 444 + ], + "blocks": [ + { + "bbox": [ + 425, + 329, + 536, + 342 + ], + "lines": [ + { + "bbox": [ + 425, + 329, + 536, + 342 + ], + "spans": [ + { + "bbox": [ + 425, + 329, + 536, + 342 + ], + "type": "text", + "content": "Figure 7: Weight Norm" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 352, + 361, + 440, + 444 + ], + "lines": [ + { + "bbox": [ + 352, + 361, + 440, + 444 + ], + "spans": [ + { + "bbox": [ + 352, + 361, + 440, + 444 + ], + "type": "image", + "image_path": "3b4b375b012d324a664828356e9ada89ab2d796bd0c978efdd3c6ccc0233b487.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 446, + 361, + 533, + 444 + ], + "blocks": [ + { + "bbox": [ + 446, + 361, + 533, + 444 + ], + "lines": [ + { + "bbox": [ + 446, + 361, + 533, + 444 + ], + "spans": [ + { + "bbox": [ + 446, + 361, + 533, + 444 + ], + "type": "image", + "image_path": "09a06b40fa4ea2f11c53a1fb0b61297195528e6be636a1b05e4aa3162e56bcea.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 76, + 509, + 167, + 593 + ], + "blocks": [ + { + "bbox": [ + 76, + 509, + 167, + 593 + ], + "lines": [ + { + "bbox": [ + 76, + 509, + 167, + 593 + ], + "spans": [ + { + "bbox": [ + 76, + 509, + 167, + 593 + ], + "type": "image", + "image_path": "672c0c412effe3f158ea40afc7de6980e89697915f6ff25df8aa79757cb6e4a2.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 196, + 607, + 414, + 621 + ], + "lines": [ + { + "bbox": [ + 196, + 607, + 414, + 621 + ], + "spans": [ + { + "bbox": [ + 196, + 607, + 414, + 621 + ], + "type": "text", + "content": "Figure 9: Histogram of " + }, + { + "bbox": [ + 196, + 607, + 414, + 621 + ], + "type": "inline_equation", + "content": "\\rho_{x}" + }, + { + "bbox": [ + 196, + 607, + 414, + 621 + ], + "type": "text", + "content": " across training steps" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_caption" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 169, + 508, + 257, + 592 + ], + "blocks": [ + { + "bbox": [ + 169, + 508, + 257, + 592 + ], + "lines": [ + { + "bbox": [ + 169, + 508, + 257, + 592 + ], + "spans": [ + { + "bbox": [ + 169, + 508, + 257, + 592 + ], + "type": "image", + "image_path": "9314a20b371980d52ecbb3503a73f93a81b6bb2a673d0623309bc41fbb73253e.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 261, + 508, + 350, + 592 + ], + "blocks": [ + { + "bbox": [ + 261, + 508, + 350, + 592 + ], + "lines": [ + { + "bbox": [ + 261, + 508, + 350, + 592 + ], + "spans": [ + { + "bbox": [ + 261, + 508, + 350, + 592 + ], + "type": "image", + "image_path": "525268009e6075c1325653ae3ac1f4d2d550342360d9da06d7710798d251f1d6.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 354, + 508, + 442, + 592 + ], + "blocks": [ + { + "bbox": [ + 354, + 508, + 442, + 592 + ], + "lines": [ + { + "bbox": [ + 354, + 508, + 442, + 592 + ], + "spans": [ + { + "bbox": [ + 354, + 508, + 442, + 592 + ], + "type": "image", + "image_path": "5a57a6efe0c941d086713a9c86373744f480001ca15dc55fec49bde54b75585e.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 446, + 508, + 535, + 593 + ], + "blocks": [ + { + "bbox": [ + 446, + 508, + 535, + 593 + ], + "lines": [ + { + "bbox": [ + 446, + 508, + 535, + 593 + ], + "spans": [ + { + "bbox": [ + 446, + 508, + 535, + 593 + ], + "type": "image", + "image_path": "b1e638e906973e22c65e08bce79f7b716ac0fa8700587808cf2a8fb6a77abce6.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + } + ], + "index": 19 + }, + { + "bbox": [ + 69, + 650, + 211, + 666 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 650, + 211, + 666 + ], + "spans": [ + { + "bbox": [ + 69, + 650, + 211, + 666 + ], + "type": "text", + "content": "B Expected Pass@k" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 69, + 683, + 152, + 696 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 683, + 152, + 696 + ], + "spans": [ + { + "bbox": [ + 69, + 683, + 152, + 696 + ], + "type": "text", + "content": "Proposition B.1." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 162, + 708, + 448, + 726 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 162, + 708, + 448, + 726 + ], + "spans": [ + { + "bbox": [ + 162, + 708, + 448, + 726 + ], + "type": "interline_equation", + "content": "\\mathbb {E} _ {x, y \\sim \\mathcal {D}} \\left[ \\mathrm {P a s s @ K} (x) \\right] \\leq 1 - \\left(\\left(\\mathbb {E} _ {x, y \\sim \\mathcal {D}} [ 1 - \\rho_ {x} ]\\right) ^ {2} + \\mathrm {V a r} (\\rho_ {x})\\right) ^ {k / 2}", + "image_path": "eb4eea5cbee71f18b652ea445b4eb335b63d414b6e7e2030b948332fc745a373.jpg" + } + ] + } + ], + "index": 23 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 69, + 26, + 266, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 26, + 266, + 38 + ], + "spans": [ + { + "bbox": [ + 69, + 26, + 266, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 746, + 311, + 756 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 746, + 311, + 756 + ], + "spans": [ + { + "bbox": [ + 300, + 746, + 311, + 756 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 24 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 76, + 99, + 89 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 76, + 99, + 89 + ], + "spans": [ + { + "bbox": [ + 69, + 76, + 99, + 89 + ], + "type": "text", + "content": "Proof." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 150, + 95, + 541, + 186 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 150, + 95, + 541, + 186 + ], + "spans": [ + { + "bbox": [ + 150, + 95, + 541, + 186 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathbb {E} \\left[ (1 - \\rho_ {x}) ^ {k} \\right] \\geq \\mathbb {E} \\left[ (1 - \\rho_ {X}) ^ {2} \\right] ^ {k / 2} (5) \\\\ = \\left(1 - 2 \\mathbb {E} \\left[ \\rho_ {x} \\right] + \\mathbb {E} \\left[ \\rho_ {x} ^ {2} \\right]\\right) ^ {k / 2} (6) \\\\ = \\left(\\left(1 - 2 \\mathbb {E} [ \\rho_ {x} ] + \\mathbb {E} [ \\rho_ {x} ] ^ {2}\\right) + \\left(\\mathbb {E} \\left[ \\rho_ {x} ^ {2} \\right] - \\mathbb {E} [ \\rho_ {x} ] ^ {2}\\right)\\right) ^ {k / 2} (7) \\\\ = \\left(\\left(1 - \\mathbb {E} [ \\rho_ {x} ]\\right) ^ {2} + \\operatorname {V a r} (\\rho_ {x})\\right) ^ {k / 2} (8) \\\\ \\end{array}", + "image_path": "be9b911be9a878ffd4e4573a062e54694ee59dafba8f9f08c927f7135b54b9b9.jpg" + } + ] + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 529, + 191, + 541, + 201 + ], + "blocks": [ + { + "bbox": [ + 529, + 191, + 541, + 201 + ], + "lines": [ + { + "bbox": [ + 529, + 191, + 541, + 201 + ], + "spans": [ + { + "bbox": [ + 529, + 191, + 541, + 201 + ], + "type": "image", + "image_path": "d6563642ee50cd803af3a0e79797353c9f1d469062b359c5cc621ad0702e4063.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 70, + 26, + 264, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 26, + 264, + 38 + ], + "spans": [ + { + "bbox": [ + 70, + 26, + 264, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 746, + 311, + 756 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 746, + 311, + 756 + ], + "spans": [ + { + "bbox": [ + 300, + 746, + 311, + 756 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 4 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 76, + 168, + 92 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 76, + 168, + 92 + ], + "spans": [ + { + "bbox": [ + 69, + 76, + 168, + 92 + ], + "type": "text", + "content": "C RL Theory" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 105, + 147, + 118 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 105, + 147, + 118 + ], + "spans": [ + { + "bbox": [ + 69, + 105, + 147, + 118 + ], + "type": "text", + "content": "C.1 Overview" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 127, + 541, + 153 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 127, + 541, + 153 + ], + "spans": [ + { + "bbox": [ + 67, + 127, + 541, + 153 + ], + "type": "text", + "content": "We will prove that in a discrete bandit setting with " + }, + { + "bbox": [ + 67, + 127, + 541, + 153 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 67, + 127, + 541, + 153 + ], + "type": "text", + "content": " equally good arms that is the best arm, both REINFORCE and GRPO without KL regularization will eventually collapse into a single-arm strategy." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 157, + 541, + 195 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 157, + 541, + 195 + ], + "spans": [ + { + "bbox": [ + 67, + 157, + 541, + 195 + ], + "type": "text", + "content": "We will further prove that, with KL regularization with respect to the initial policy, the converged policy of REINFORCE have the same action distribution as the initial policy when constrained on the set of best arms. Therefore, diversity within good actions will not increase through REINFORCE training." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 205, + 205, + 220 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 205, + 205, + 220 + ], + "spans": [ + { + "bbox": [ + 69, + 205, + 205, + 220 + ], + "type": "text", + "content": "C.2 Notations and Setup" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 228, + 542, + 264 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 228, + 542, + 264 + ], + "spans": [ + { + "bbox": [ + 67, + 228, + 542, + 264 + ], + "type": "text", + "content": "Formally we consider the following setting. We consider a " + }, + { + "bbox": [ + 67, + 228, + 542, + 264 + ], + "type": "inline_equation", + "content": "K + 1" + }, + { + "bbox": [ + 67, + 228, + 542, + 264 + ], + "type": "text", + "content": "-armed bandit, with arms " + }, + { + "bbox": [ + 67, + 228, + 542, + 264 + ], + "type": "inline_equation", + "content": "\\{1,2,\\dots ,K + 1\\}" + }, + { + "bbox": [ + 67, + 228, + 542, + 264 + ], + "type": "text", + "content": ". Arms " + }, + { + "bbox": [ + 67, + 228, + 542, + 264 + ], + "type": "inline_equation", + "content": "1,\\ldots ,K" + }, + { + "bbox": [ + 67, + 228, + 542, + 264 + ], + "type": "text", + "content": " are \"good,\" each yielding reward 1, and the other arm is \"bad,\" yielding reward 0. We use a softmax parameterization:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 216, + 264, + 392, + 297 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 216, + 264, + 392, + 297 + ], + "spans": [ + { + "bbox": [ + 216, + 264, + 392, + 297 + ], + "type": "interline_equation", + "content": "p _ {i} = \\frac {e ^ {\\theta_ {i}}}{\\sum_ {j = 1} ^ {K + 1} e ^ {\\theta_ {j}}}, \\quad i = 1, \\dots , K + 1.", + "image_path": "62c0999f68f4b3f6b5180018a04cacb5187ec510a244fb389d70096c3972f24c.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 68, + 300, + 443, + 316 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 300, + 443, + 316 + ], + "spans": [ + { + "bbox": [ + 68, + 300, + 443, + 316 + ], + "type": "text", + "content": "to denote the action distribution. We will use " + }, + { + "bbox": [ + 68, + 300, + 443, + 316 + ], + "type": "inline_equation", + "content": "\\theta_i^{(t)}" + }, + { + "bbox": [ + 68, + 300, + 443, + 316 + ], + "type": "text", + "content": " to denote the parameter at step " + }, + { + "bbox": [ + 68, + 300, + 443, + 316 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 68, + 300, + 443, + 316 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 319, + 541, + 344 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 319, + 541, + 344 + ], + "spans": [ + { + "bbox": [ + 67, + 319, + 541, + 344 + ], + "type": "text", + "content": "It is standard to consider using the KL divergence between the current policy with a reference policy (which we set as " + }, + { + "bbox": [ + 67, + 319, + 541, + 344 + ], + "type": "inline_equation", + "content": "p_0" + }, + { + "bbox": [ + 67, + 319, + 541, + 344 + ], + "type": "text", + "content": " here) as a regularization term." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 228, + 346, + 380, + 383 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 228, + 346, + 380, + 383 + ], + "spans": [ + { + "bbox": [ + 228, + 346, + 380, + 383 + ], + "type": "interline_equation", + "content": "\\mathrm {K L} (p ^ {(t)} | p ^ {(0)}) = \\sum_ {i = 1} ^ {K + 1} p _ {i} ^ {(t)} \\log \\frac {p _ {i} ^ {(t)}}{p _ {i} ^ {(0)}}", + "image_path": "ff377ce696e12e8f4f5eeea526e1b95b06bbe0822fbee618a5e9e16fb377f70c.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 68, + 390, + 399, + 403 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 390, + 399, + 403 + ], + "spans": [ + { + "bbox": [ + 68, + 390, + 399, + 403 + ], + "type": "text", + "content": "For REINFORCE, we will consider the following training setup. At step " + }, + { + "bbox": [ + 68, + 390, + 399, + 403 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 68, + 390, + 399, + 403 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 92, + 411, + 465, + 442 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 92, + 411, + 465, + 429 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 92, + 411, + 465, + 429 + ], + "spans": [ + { + "bbox": [ + 92, + 411, + 465, + 429 + ], + "type": "text", + "content": "1. We sample an arm " + }, + { + "bbox": [ + 92, + 411, + 465, + 429 + ], + "type": "inline_equation", + "content": "I_{t}" + }, + { + "bbox": [ + 92, + 411, + 465, + 429 + ], + "type": "text", + "content": " according to " + }, + { + "bbox": [ + 92, + 411, + 465, + 429 + ], + "type": "inline_equation", + "content": "p(\\cdot) = (p_1^{(t)},\\dots ,p_{K + 1}^{(t)})" + }, + { + "bbox": [ + 92, + 411, + 465, + 429 + ], + "type": "text", + "content": " and receive reward " + }, + { + "bbox": [ + 92, + 411, + 465, + 429 + ], + "type": "inline_equation", + "content": "r_t" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 92, + 430, + 254, + 442 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 92, + 430, + 254, + 442 + ], + "spans": [ + { + "bbox": [ + 92, + 430, + 254, + 442 + ], + "type": "text", + "content": "2. We update using policy gradient." + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 140, + 445, + 504, + 464 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 445, + 504, + 464 + ], + "spans": [ + { + "bbox": [ + 140, + 445, + 504, + 464 + ], + "type": "interline_equation", + "content": "\\theta_ {i} ^ {(t + 1)} = \\theta_ {i} ^ {(t)} + \\eta r _ {t} \\nabla_ {\\theta_ {i}} (\\log p _ {I _ {t}} ^ {(t)}) - \\eta \\beta \\nabla_ {\\theta_ {i}} \\mathrm {K L} (p ^ {(t)} | p ^ {(0)}), i = 1, \\dots , K + 1,", + "image_path": "3f01e7c236fd71d192033777739815a43af7536fe7a6e1121aeed7d48f33e627.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 464, + 541, + 489 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 464, + 541, + 489 + ], + "spans": [ + { + "bbox": [ + 104, + 464, + 541, + 489 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 464, + 541, + 489 + ], + "type": "inline_equation", + "content": "\\eta > 0" + }, + { + "bbox": [ + 104, + 464, + 541, + 489 + ], + "type": "text", + "content": " is the step size and " + }, + { + "bbox": [ + 104, + 464, + 541, + 489 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 104, + 464, + 541, + 489 + ], + "type": "text", + "content": " is the hyperparameter controlling the strength of KL regularization." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 67, + 498, + 541, + 523 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 498, + 541, + 523 + ], + "spans": [ + { + "bbox": [ + 67, + 498, + 541, + 523 + ], + "type": "text", + "content": "For GRPO, we will consider the following simplified training setup. This is equivalent to the empirical version of GRPO with online sampling." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 92, + 530, + 512, + 562 + ], + "type": "list", + "angle": 0, + "index": 20, + "blocks": [ + { + "bbox": [ + 92, + 530, + 512, + 548 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 92, + 530, + 512, + 548 + ], + "spans": [ + { + "bbox": [ + 92, + 530, + 512, + 548 + ], + "type": "text", + "content": "1. Sample " + }, + { + "bbox": [ + 92, + 530, + 512, + 548 + ], + "type": "inline_equation", + "content": "G" + }, + { + "bbox": [ + 92, + 530, + 512, + 548 + ], + "type": "text", + "content": " arms " + }, + { + "bbox": [ + 92, + 530, + 512, + 548 + ], + "type": "inline_equation", + "content": "\\{I_t^{(1)},\\dots ,I_t^{(G)}\\}" + }, + { + "bbox": [ + 92, + 530, + 512, + 548 + ], + "type": "text", + "content": " i.i.d. from the current policy " + }, + { + "bbox": [ + 92, + 530, + 512, + 548 + ], + "type": "inline_equation", + "content": "p(\\cdot)" + }, + { + "bbox": [ + 92, + 530, + 512, + 548 + ], + "type": "text", + "content": " and receive rewards " + }, + { + "bbox": [ + 92, + 530, + 512, + 548 + ], + "type": "inline_equation", + "content": "r_t^{(g)}" + }, + { + "bbox": [ + 92, + 530, + 512, + 548 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 93, + 549, + 149, + 562 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 93, + 549, + 149, + 562 + ], + "spans": [ + { + "bbox": [ + 93, + 549, + 149, + 562 + ], + "type": "text", + "content": "2. Compute" + } + ] + } + ], + "index": 19 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 211, + 563, + 433, + 599 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 211, + 563, + 433, + 599 + ], + "spans": [ + { + "bbox": [ + 211, + 563, + 433, + 599 + ], + "type": "interline_equation", + "content": "\\mu_ {t} = \\frac {1}{G} \\sum_ {g = 1} ^ {G} r _ {t} ^ {(g)}, \\quad \\sigma_ {t} = \\sqrt {\\frac {1}{G} \\sum_ {g = 1} ^ {G} \\left(r _ {t} ^ {(g)} - \\mu_ {t}\\right) ^ {2}},", + "image_path": "db21166a21e933cccd81e5480a94c5617a43c7911409efb16d9d732815acbe3d.jpg" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 104, + 600, + 272, + 613 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 600, + 272, + 613 + ], + "spans": [ + { + "bbox": [ + 104, + 600, + 272, + 613 + ], + "type": "text", + "content": "and define the normalized advantage" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 252, + 616, + 392, + 661 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 252, + 616, + 392, + 661 + ], + "spans": [ + { + "bbox": [ + 252, + 616, + 392, + 661 + ], + "type": "interline_equation", + "content": "\\begin{array}{r} \\tilde {r} _ {t} ^ {(g)} = \\left\\{ \\begin{array}{l l} \\frac {r _ {t} ^ {(g)} - \\mu_ {t}}{\\sigma_ {t}}, & \\sigma_ {t} \\neq 0, \\\\ 0, & \\sigma_ {t} = 0. \\end{array} \\right. \\end{array}", + "image_path": "755487d49278fc8f427c89062e4764a3dbed733574cc63f87205903fb44ea7e0.jpg" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 104, + 662, + 257, + 676 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 662, + 257, + 676 + ], + "spans": [ + { + "bbox": [ + 104, + 662, + 257, + 676 + ], + "type": "text", + "content": "We will skip the update if " + }, + { + "bbox": [ + 104, + 662, + 257, + 676 + ], + "type": "inline_equation", + "content": "\\sigma_t = 0" + }, + { + "bbox": [ + 104, + 662, + 257, + 676 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 92, + 678, + 239, + 691 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 92, + 678, + 239, + 691 + ], + "spans": [ + { + "bbox": [ + 92, + 678, + 239, + 691 + ], + "type": "text", + "content": "3. Update each parameter " + }, + { + "bbox": [ + 92, + 678, + 239, + 691 + ], + "type": "inline_equation", + "content": "\\theta_{i}" + }, + { + "bbox": [ + 92, + 678, + 239, + 691 + ], + "type": "text", + "content": " via" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 135, + 695, + 509, + 730 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 135, + 695, + 509, + 730 + ], + "spans": [ + { + "bbox": [ + 135, + 695, + 509, + 730 + ], + "type": "interline_equation", + "content": "\\theta_ {i} \\gets \\theta_ {i} + \\frac {\\eta}{G} \\sum_ {g = 1} ^ {G} \\widehat {r} _ {t} ^ {(g)} \\nabla_ {\\theta_ {i}} (\\log p _ {I _ {t} ^ {(g)}} ^ {(t)}) - \\eta \\beta \\nabla_ {\\theta_ {i}} \\mathrm {K L} (p ^ {(t)} | p ^ {(0)}). i = 1, \\ldots , K + 1,", + "image_path": "76912c1563647074f02f5a01f22b64a0f1b55b9425bc2f039804f293c31af747.jpg" + } + ] + } + ], + "index": 26 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 69, + 26, + 263, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 26, + 263, + 38 + ], + "spans": [ + { + "bbox": [ + 69, + 26, + 263, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 746, + 310, + 756 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 746, + 310, + 756 + ], + "spans": [ + { + "bbox": [ + 300, + 746, + 310, + 756 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 27 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 68, + 76, + 387, + 91 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 76, + 387, + 91 + ], + "spans": [ + { + "bbox": [ + 68, + 76, + 387, + 91 + ], + "type": "text", + "content": "C.3 Implicit Diversity Collapse without KL regularization" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 68, + 99, + 542, + 125 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 99, + 542, + 125 + ], + "spans": [ + { + "bbox": [ + 68, + 99, + 542, + 125 + ], + "type": "text", + "content": "Theorem C.1 (Collapse to Deterministic Policy). Under REINFORCE or GRPO updates without KL regularization " + }, + { + "bbox": [ + 68, + 99, + 542, + 125 + ], + "type": "inline_equation", + "content": "(\\beta_0 = 0)" + }, + { + "bbox": [ + 68, + 99, + 542, + 125 + ], + "type": "text", + "content": ", given a sufficient small " + }, + { + "bbox": [ + 68, + 99, + 542, + 125 + ], + "type": "inline_equation", + "content": "\\eta" + }, + { + "bbox": [ + 68, + 99, + 542, + 125 + ], + "type": "text", + "content": ", with probability 1:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 253, + 130, + 355, + 156 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 253, + 130, + 355, + 156 + ], + "spans": [ + { + "bbox": [ + 253, + 130, + 355, + 156 + ], + "type": "interline_equation", + "content": "\\limsup_{t\\to \\infty}\\max_{i\\in [K]}p_{i}^{(t)} = 1.", + "image_path": "025400c251313c19885eeda9c411127b9f8264a71c6d8cea574e0029094f187b.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 159, + 358, + 174 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 159, + 358, + 174 + ], + "spans": [ + { + "bbox": [ + 69, + 159, + 358, + 174 + ], + "type": "text", + "content": "Thus, the policy collapses to a single-arm strategy during training." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 185, + 199, + 199 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 185, + 199, + 199 + ], + "spans": [ + { + "bbox": [ + 69, + 185, + 199, + 199 + ], + "type": "text", + "content": "Proof. The proof is two-fold." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 68, + 203, + 414, + 217 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 203, + 414, + 217 + ], + "spans": [ + { + "bbox": [ + 68, + 203, + 414, + 217 + ], + "type": "text", + "content": "Using Lemma C.3 and C.4, we can show that bad arm probability diminishes," + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 269, + 222, + 341, + 244 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 269, + 222, + 341, + 244 + ], + "spans": [ + { + "bbox": [ + 269, + 222, + 341, + 244 + ], + "type": "interline_equation", + "content": "\\lim _ {t \\to \\infty} p _ {K + 1} ^ {(t)} = 0", + "image_path": "0d716e431c635ba9b4ddf9553c1ba062384da0054901a2617578c183216748d1.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 68, + 255, + 355, + 269 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 255, + 355, + 269 + ], + "spans": [ + { + "bbox": [ + 68, + 255, + 355, + 269 + ], + "type": "text", + "content": "We will then define a property named Self-enforcing Stochastic" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 68, + 273, + 542, + 300 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 273, + 542, + 300 + ], + "spans": [ + { + "bbox": [ + 68, + 273, + 542, + 300 + ], + "type": "text", + "content": "Definition C.2 (Self-enforcing Stochastic Policy Update Rule). We define three properties of policy update rule that will lead to diversity collapse" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 93, + 311, + 542, + 456 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 93, + 311, + 542, + 352 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 93, + 311, + 542, + 352 + ], + "spans": [ + { + "bbox": [ + 93, + 311, + 542, + 352 + ], + "type": "text", + "content": "1. The policy update takes the form of " + }, + { + "bbox": [ + 93, + 311, + 542, + 352 + ], + "type": "inline_equation", + "content": "\\sum_{k=1}^{B} A_k \\nabla \\log p_i(\\theta_{i_k})" + }, + { + "bbox": [ + 93, + 311, + 542, + 352 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 93, + 311, + 542, + 352 + ], + "type": "inline_equation", + "content": "i_k" + }, + { + "bbox": [ + 93, + 311, + 542, + 352 + ], + "type": "text", + "content": " is the " + }, + { + "bbox": [ + 93, + 311, + 542, + 352 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 93, + 311, + 542, + 352 + ], + "type": "text", + "content": "-th sampled arm in the batch and " + }, + { + "bbox": [ + 93, + 311, + 542, + 352 + ], + "type": "inline_equation", + "content": "A_k" + }, + { + "bbox": [ + 93, + 311, + 542, + 352 + ], + "type": "text", + "content": " is a function determined by (i) the sum of reward " + }, + { + "bbox": [ + 93, + 311, + 542, + 352 + ], + "type": "inline_equation", + "content": "\\sum_{i=1}^{K} r_{i_k}" + }, + { + "bbox": [ + 93, + 311, + 542, + 352 + ], + "type": "text", + "content": " with in the batch; (ii) the reward " + }, + { + "bbox": [ + 93, + 311, + 542, + 352 + ], + "type": "inline_equation", + "content": "r_{i_k}" + }, + { + "bbox": [ + 93, + 311, + 542, + 352 + ], + "type": "text", + "content": " and (iii) the batch size " + }, + { + "bbox": [ + 93, + 311, + 542, + 352 + ], + "type": "inline_equation", + "content": "B" + }, + { + "bbox": [ + 93, + 311, + 542, + 352 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 93, + 358, + 542, + 392 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 93, + 358, + 542, + 392 + ], + "spans": [ + { + "bbox": [ + 93, + 358, + 542, + 392 + ], + "type": "text", + "content": "2. A policy update rule is said to be self-enforcing, if " + }, + { + "bbox": [ + 93, + 358, + 542, + 392 + ], + "type": "inline_equation", + "content": "\\mathbb{E}[\\theta_i^{(t + 1)} - \\theta_i^{(t)}]" + }, + { + "bbox": [ + 93, + 358, + 542, + 392 + ], + "type": "text", + "content": " is monotonous with " + }, + { + "bbox": [ + 93, + 358, + 542, + 392 + ], + "type": "inline_equation", + "content": "\\theta_{i}^{(t)}" + }, + { + "bbox": [ + 93, + 358, + 542, + 392 + ], + "type": "text", + "content": " for all " + }, + { + "bbox": [ + 93, + 358, + 542, + 392 + ], + "type": "inline_equation", + "content": "i\\in [K]" + }, + { + "bbox": [ + 93, + 358, + 542, + 392 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 93, + 358, + 542, + 392 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 93, + 358, + 542, + 392 + ], + "type": "text", + "content": ". Further " + }, + { + "bbox": [ + 93, + 358, + 542, + 392 + ], + "type": "inline_equation", + "content": "\\mathbb{E}[\\theta_i^{(t + 1)} - \\theta_i^{(t)}]" + }, + { + "bbox": [ + 93, + 358, + 542, + 392 + ], + "type": "text", + "content": " is non-positive if " + }, + { + "bbox": [ + 93, + 358, + 542, + 392 + ], + "type": "inline_equation", + "content": "i\\geq K + 1" + }, + { + "bbox": [ + 93, + 358, + 542, + 392 + ], + "type": "text", + "content": " and is non-negative if " + }, + { + "bbox": [ + 93, + 358, + 542, + 392 + ], + "type": "inline_equation", + "content": "i\\leq K" + }, + { + "bbox": [ + 93, + 358, + 542, + 392 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 93, + 397, + 542, + 456 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 93, + 397, + 542, + 456 + ], + "spans": [ + { + "bbox": [ + 93, + 397, + 542, + 456 + ], + "type": "text", + "content": "3. A policy update rule is said to be self-enforcing stochastic if it is self-enforcing and there exists constants " + }, + { + "bbox": [ + 93, + 397, + 542, + 456 + ], + "type": "inline_equation", + "content": "C_1, C_2 > 0" + }, + { + "bbox": [ + 93, + 397, + 542, + 456 + ], + "type": "text", + "content": " such that for any " + }, + { + "bbox": [ + 93, + 397, + 542, + 456 + ], + "type": "inline_equation", + "content": "\\epsilon > 0" + }, + { + "bbox": [ + 93, + 397, + 542, + 456 + ], + "type": "text", + "content": ", whenever the current policy satisfies " + }, + { + "bbox": [ + 93, + 397, + 542, + 456 + ], + "type": "inline_equation", + "content": "\\max_{i \\in [K]} p_i^{(t)} \\in [1/2K, 1 - \\epsilon]" + }, + { + "bbox": [ + 93, + 397, + 542, + 456 + ], + "type": "text", + "content": " (i.e., no single good arm dominates), for " + }, + { + "bbox": [ + 93, + 397, + 542, + 456 + ], + "type": "inline_equation", + "content": "i^* = \\arg \\max_{i \\in [K]} p_i^{(t)}" + }, + { + "bbox": [ + 93, + 397, + 542, + 456 + ], + "type": "text", + "content": " the conditional second moment of the parameter updates for every arm " + }, + { + "bbox": [ + 93, + 397, + 542, + 456 + ], + "type": "inline_equation", + "content": "i \\in [K + 1]" + }, + { + "bbox": [ + 93, + 397, + 542, + 456 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 93, + 397, + 542, + 456 + ], + "type": "inline_equation", + "content": "i \\neq i^*" + }, + { + "bbox": [ + 93, + 397, + 542, + 456 + ], + "type": "text", + "content": " satisfies:" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 196, + 461, + 448, + 489 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 196, + 461, + 448, + 489 + ], + "spans": [ + { + "bbox": [ + 196, + 461, + 448, + 489 + ], + "type": "interline_equation", + "content": "\\mathbb {E} \\left[ \\left(\\left(\\theta_ {i} ^ {(t + 1)} - \\theta_ {i} ^ {(t)}\\right) - \\left(\\theta_ {i ^ {*}} ^ {(t + 1)} - \\theta_ {i ^ {*}} ^ {(t)}\\right)\\right) ^ {2} \\mid \\theta^ {(t)} \\right] \\geq C _ {1} \\epsilon^ {2}.", + "image_path": "b0ed055ba002df5032ea09faa74193e795a62153a65d9469f3715cc3b807f7ff.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 494, + 126, + 505 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 494, + 126, + 505 + ], + "spans": [ + { + "bbox": [ + 105, + 494, + 126, + 505 + ], + "type": "text", + "content": "and" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 277, + 502, + 369, + 520 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 277, + 502, + 369, + 520 + ], + "spans": [ + { + "bbox": [ + 277, + 502, + 369, + 520 + ], + "type": "interline_equation", + "content": "| \\theta_ {i} ^ {(t + 1)} - \\theta_ {i} ^ {(t)} | < C _ {2}", + "image_path": "66ac2568e0176bd956a7f1d1f5e7aed9e9fba0733ce10c464bbf875e9c49fbad.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 68, + 530, + 540, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 530, + 540, + 555 + ], + "spans": [ + { + "bbox": [ + 68, + 530, + 540, + 555 + ], + "type": "text", + "content": "Lemma C.5 shows that for any self-enforcing stochastic policy update rule, the final policy collapses into a single-arm policy." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 68, + 559, + 542, + 586 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 559, + 542, + 586 + ], + "spans": [ + { + "bbox": [ + 68, + 559, + 542, + 586 + ], + "type": "text", + "content": "Using Lemma C.6 and C.7, we can show that REINFORCE and GRPO are self-enforcing stochastic policy update rules when bad arm probability is lower than " + }, + { + "bbox": [ + 68, + 559, + 542, + 586 + ], + "type": "inline_equation", + "content": "1 / 2" + }, + { + "bbox": [ + 68, + 559, + 542, + 586 + ], + "type": "text", + "content": ". The proof is then complete." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 68, + 591, + 542, + 622 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 591, + 542, + 622 + ], + "spans": [ + { + "bbox": [ + 68, + 591, + 542, + 622 + ], + "type": "text", + "content": "Lemma C.3 (Bad Arm Probability Diminishes Using REINFORCE). Under the REINFORCE algorithm without KL regularization " + }, + { + "bbox": [ + 68, + 591, + 542, + 622 + ], + "type": "inline_equation", + "content": "(\\beta = 0)" + }, + { + "bbox": [ + 68, + 591, + 542, + 622 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 68, + 591, + 542, + 622 + ], + "type": "inline_equation", + "content": "\\lim_{t\\to \\infty}p_{K + 1}^{(t)} = 0" + }, + { + "bbox": [ + 68, + 591, + 542, + 622 + ], + "type": "text", + "content": " almost surely." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 69, + 632, + 335, + 645 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 632, + 335, + 645 + ], + "spans": [ + { + "bbox": [ + 69, + 632, + 335, + 645 + ], + "type": "text", + "content": "Proof. We can first simplify the REINFORCE update rule to" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 176, + 651, + 433, + 669 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 176, + 651, + 433, + 669 + ], + "spans": [ + { + "bbox": [ + 176, + 651, + 433, + 669 + ], + "type": "interline_equation", + "content": "\\theta_ {i} ^ {(t + 1)} = \\theta_ {i} ^ {(t)} + \\eta r _ {t} (\\mathbf {1} (I _ {t} = i) - p _ {i} ^ {(t)}), \\quad i = 1, \\dots , K + 1.", + "image_path": "5389864675d1dd6b4715e8cae0d0e3a2a961b2c51f11fab71e30b0988c041768.jpg" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 68, + 681, + 332, + 697 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 681, + 332, + 697 + ], + "spans": [ + { + "bbox": [ + 68, + 681, + 332, + 697 + ], + "type": "text", + "content": "Noted that " + }, + { + "bbox": [ + 68, + 681, + 332, + 697 + ], + "type": "inline_equation", + "content": "\\sum_{i}\\theta_{i}^{(t)}" + }, + { + "bbox": [ + 68, + 681, + 332, + 697 + ], + "type": "text", + "content": " will not change with " + }, + { + "bbox": [ + 68, + 681, + 332, + 697 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 68, + 681, + 332, + 697 + ], + "type": "text", + "content": ", WLOG, assume" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 276, + 703, + 334, + 730 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 276, + 703, + 334, + 730 + ], + "spans": [ + { + "bbox": [ + 276, + 703, + 334, + 730 + ], + "type": "interline_equation", + "content": "\\sum_ {i} \\theta_ {i} ^ {(t)} = 0.", + "image_path": "b944cf1cc45211aee3e8d25a15fec6ac4fa6994c7df46fbac79b006f97f67b7e.jpg" + } + ] + } + ], + "index": 23 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 69, + 26, + 263, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 26, + 263, + 38 + ], + "spans": [ + { + "bbox": [ + 69, + 26, + 263, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 747, + 310, + 756 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 747, + 310, + 756 + ], + "spans": [ + { + "bbox": [ + 300, + 747, + 310, + 756 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 24 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "bbox": [ + 68, + 76, + 444, + 90 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 76, + 444, + 90 + ], + "spans": [ + { + "bbox": [ + 68, + 76, + 444, + 90 + ], + "type": "text", + "content": "Because " + }, + { + "bbox": [ + 68, + 76, + 444, + 90 + ], + "type": "inline_equation", + "content": "r_{K + 1} = 0" + }, + { + "bbox": [ + 68, + 76, + 444, + 90 + ], + "type": "text", + "content": ", we can then assume without loss of generality, for all " + }, + { + "bbox": [ + 68, + 76, + 444, + 90 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 68, + 76, + 444, + 90 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 68, + 76, + 444, + 90 + ], + "type": "inline_equation", + "content": "I_t \\leq K" + }, + { + "bbox": [ + 68, + 76, + 444, + 90 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 95, + 176, + 107 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 95, + 176, + 107 + ], + "spans": [ + { + "bbox": [ + 69, + 95, + 176, + 107 + ], + "type": "text", + "content": "This then suggests that" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 248, + 108, + 361, + 127 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 248, + 108, + 361, + 127 + ], + "spans": [ + { + "bbox": [ + 248, + 108, + 361, + 127 + ], + "type": "interline_equation", + "content": "\\theta_ {K + 1} ^ {(t + 1)} = \\theta_ {K + 1} ^ {(t)} - \\eta p _ {K + 1} ^ {(t)}", + "image_path": "08617bf65e4ebad736f26039f5ce3bc40693f92d1f1c5988d045480873e04806.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 68, + 131, + 180, + 144 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 131, + 180, + 144 + ], + "spans": [ + { + "bbox": [ + 68, + 131, + 180, + 144 + ], + "type": "text", + "content": "monotonically decrease." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 68, + 150, + 542, + 183 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 150, + 542, + 183 + ], + "spans": [ + { + "bbox": [ + 68, + 150, + 542, + 183 + ], + "type": "text", + "content": "For any " + }, + { + "bbox": [ + 68, + 150, + 542, + 183 + ], + "type": "inline_equation", + "content": "\\epsilon" + }, + { + "bbox": [ + 68, + 150, + 542, + 183 + ], + "type": "text", + "content": ", if " + }, + { + "bbox": [ + 68, + 150, + 542, + 183 + ], + "type": "inline_equation", + "content": "p_{K + 1}^{(t)} > \\epsilon" + }, + { + "bbox": [ + 68, + 150, + 542, + 183 + ], + "type": "text", + "content": " holds for infinite " + }, + { + "bbox": [ + 68, + 150, + 542, + 183 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 68, + 150, + 542, + 183 + ], + "type": "text", + "content": ", then there exists " + }, + { + "bbox": [ + 68, + 150, + 542, + 183 + ], + "type": "inline_equation", + "content": "t_0" + }, + { + "bbox": [ + 68, + 150, + 542, + 183 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 68, + 150, + 542, + 183 + ], + "type": "inline_equation", + "content": "\\theta_{K + 1}^t < \\log \\epsilon" + }, + { + "bbox": [ + 68, + 150, + 542, + 183 + ], + "type": "text", + "content": " for any " + }, + { + "bbox": [ + 68, + 150, + 542, + 183 + ], + "type": "inline_equation", + "content": "t > t_0" + }, + { + "bbox": [ + 68, + 150, + 542, + 183 + ], + "type": "text", + "content": ". For any " + }, + { + "bbox": [ + 68, + 150, + 542, + 183 + ], + "type": "inline_equation", + "content": "t > t_0" + }, + { + "bbox": [ + 68, + 150, + 542, + 183 + ], + "type": "text", + "content": ", there exists " + }, + { + "bbox": [ + 68, + 150, + 542, + 183 + ], + "type": "inline_equation", + "content": "i \\in [K]" + }, + { + "bbox": [ + 68, + 150, + 542, + 183 + ], + "type": "text", + "content": ", such that " + }, + { + "bbox": [ + 68, + 150, + 542, + 183 + ], + "type": "inline_equation", + "content": "\\theta_i^{(t)} > 0" + }, + { + "bbox": [ + 68, + 150, + 542, + 183 + ], + "type": "text", + "content": ". This then suggests that" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 232, + 189, + 376, + 208 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 232, + 189, + 376, + 208 + ], + "spans": [ + { + "bbox": [ + 232, + 189, + 376, + 208 + ], + "type": "interline_equation", + "content": "p _ {K + 1} ^ {(t)} \\leq \\exp (\\theta_ {K + 1} ^ {(t)} - \\theta_ {i} ^ {(t)}) \\leq \\epsilon .", + "image_path": "a5aea18089ca053347f8eb61bd79c4fcdbe3f455df74855d569db5b6dfaee20b.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 68, + 213, + 327, + 227 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 213, + 327, + 227 + ], + "spans": [ + { + "bbox": [ + 68, + 213, + 327, + 227 + ], + "type": "text", + "content": "This leads to a contradiction. The proof is then complete." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 68, + 238, + 541, + 268 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 238, + 541, + 268 + ], + "spans": [ + { + "bbox": [ + 68, + 238, + 541, + 268 + ], + "type": "text", + "content": "Lemma C.4 (Bad Arm Probability Diminishes Using GRPO). Under the GRPO algorithm without KL regularization " + }, + { + "bbox": [ + 68, + 238, + 541, + 268 + ], + "type": "inline_equation", + "content": "(\\beta = 0), \\lim_{t \\to \\infty} p_{K+1}^{(t)} = 0" + }, + { + "bbox": [ + 68, + 238, + 541, + 268 + ], + "type": "text", + "content": " almost surely." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 68, + 285, + 540, + 314 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 285, + 540, + 314 + ], + "spans": [ + { + "bbox": [ + 68, + 285, + 540, + 314 + ], + "type": "text", + "content": "Proof. For GRPO, we can show that " + }, + { + "bbox": [ + 68, + 285, + 540, + 314 + ], + "type": "inline_equation", + "content": "\\tilde{r}_t^{(g)}" + }, + { + "bbox": [ + 68, + 285, + 540, + 314 + ], + "type": "text", + "content": " is negative iff " + }, + { + "bbox": [ + 68, + 285, + 540, + 314 + ], + "type": "inline_equation", + "content": "I_t^{(g)} = K + 1" + }, + { + "bbox": [ + 68, + 285, + 540, + 314 + ], + "type": "text", + "content": ". Therefore, we can show that " + }, + { + "bbox": [ + 68, + 285, + 540, + 314 + ], + "type": "inline_equation", + "content": "\\theta_{K+1}^{(t)}" + }, + { + "bbox": [ + 68, + 285, + 540, + 314 + ], + "type": "text", + "content": " monotonically decreases, similar to the case in REINFORCE." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 68, + 319, + 541, + 365 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 319, + 541, + 365 + ], + "spans": [ + { + "bbox": [ + 68, + 319, + 541, + 365 + ], + "type": "text", + "content": "If " + }, + { + "bbox": [ + 68, + 319, + 541, + 365 + ], + "type": "inline_equation", + "content": "p_{K+1}^{(t)} > \\epsilon" + }, + { + "bbox": [ + 68, + 319, + 541, + 365 + ], + "type": "text", + "content": " holds for some " + }, + { + "bbox": [ + 68, + 319, + 541, + 365 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 68, + 319, + 541, + 365 + ], + "type": "text", + "content": ", one can prove that " + }, + { + "bbox": [ + 68, + 319, + 541, + 365 + ], + "type": "inline_equation", + "content": "\\theta_{K+1}^{(t)}" + }, + { + "bbox": [ + 68, + 319, + 541, + 365 + ], + "type": "text", + "content": " will decrease by a constant depending on " + }, + { + "bbox": [ + 68, + 319, + 541, + 365 + ], + "type": "inline_equation", + "content": "\\epsilon" + }, + { + "bbox": [ + 68, + 319, + 541, + 365 + ], + "type": "text", + "content": " in expectation. Therefore, following the same line as in C.3, we can prove that " + }, + { + "bbox": [ + 68, + 319, + 541, + 365 + ], + "type": "inline_equation", + "content": "\\lim_{t \\to \\infty} p_{K+1}^{(t)} = 0" + }, + { + "bbox": [ + 68, + 319, + 541, + 365 + ], + "type": "text", + "content": " almost surely." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 68, + 374, + 542, + 415 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 374, + 542, + 415 + ], + "spans": [ + { + "bbox": [ + 68, + 374, + 542, + 415 + ], + "type": "text", + "content": "Lemma C.5 (Collapse Happens for All Self-enforcing Stochastic Policy Update Rule). Consider a policy update process that is self-enforcing stochastic (Definition C.2), then " + }, + { + "bbox": [ + 68, + 374, + 542, + 415 + ], + "type": "inline_equation", + "content": "\\lim \\sup_{t\\to \\infty}\\max_{i\\in [K]}p_i^{(t)} = 1" + }, + { + "bbox": [ + 68, + 374, + 542, + 415 + ], + "type": "text", + "content": " almost surely." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 68, + 432, + 542, + 458 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 432, + 542, + 458 + ], + "spans": [ + { + "bbox": [ + 68, + 432, + 542, + 458 + ], + "type": "text", + "content": "Proof. We will inductively prove that for different " + }, + { + "bbox": [ + 68, + 432, + 542, + 458 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 68, + 432, + 542, + 458 + ], + "type": "text", + "content": " the following induction hypotheses, for any " + }, + { + "bbox": [ + 68, + 432, + 542, + 458 + ], + "type": "inline_equation", + "content": "\\epsilon, \\delta > 0" + }, + { + "bbox": [ + 68, + 432, + 542, + 458 + ], + "type": "text", + "content": ", there exists " + }, + { + "bbox": [ + 68, + 432, + 542, + 458 + ], + "type": "inline_equation", + "content": "T_{\\epsilon, \\delta, K} > 0" + }, + { + "bbox": [ + 68, + 432, + 542, + 458 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 226, + 479, + 383, + 503 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 226, + 479, + 383, + 503 + ], + "spans": [ + { + "bbox": [ + 226, + 479, + 383, + 503 + ], + "type": "interline_equation", + "content": "\\Pr \\left(\\max _ {t < T _ {\\epsilon , \\delta , K}} \\max _ {i \\in [ K ]} p _ {i} ^ {(t)} < 1 - \\epsilon\\right) < \\delta .", + "image_path": "0c4250ef61690cb6f1d8c4915cae968117a343314b9954e0c0942cc5689c68ec.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 68, + 516, + 251, + 529 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 516, + 251, + 529 + ], + "spans": [ + { + "bbox": [ + 68, + 516, + 251, + 529 + ], + "type": "text", + "content": "We first consider the case where " + }, + { + "bbox": [ + 68, + 516, + 251, + 529 + ], + "type": "inline_equation", + "content": "K = 2" + }, + { + "bbox": [ + 68, + 516, + 251, + 529 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 68, + 534, + 196, + 547 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 534, + 196, + 547 + ], + "spans": [ + { + "bbox": [ + 68, + 534, + 196, + 547 + ], + "type": "text", + "content": "Consider the stopping time," + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 235, + 555, + 375, + 579 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 235, + 555, + 375, + 579 + ], + "spans": [ + { + "bbox": [ + 235, + 555, + 375, + 579 + ], + "type": "interline_equation", + "content": "\\tau_ {\\epsilon} = \\arg \\min _ {t} \\max _ {i \\in [ K ]} p _ {i} ^ {(t)} > 1 - \\epsilon", + "image_path": "fbd260da70ae9fabe464358e99f7e77b4fd9b2472d84f1bf4de3ae9e3d068a3a.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 68, + 592, + 333, + 608 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 592, + 333, + 608 + ], + "spans": [ + { + "bbox": [ + 68, + 592, + 333, + 608 + ], + "type": "text", + "content": "For any " + }, + { + "bbox": [ + 68, + 592, + 333, + 608 + ], + "type": "inline_equation", + "content": "\\mathcal{I} = \\{1,2\\}" + }, + { + "bbox": [ + 68, + 592, + 333, + 608 + ], + "type": "text", + "content": ", define " + }, + { + "bbox": [ + 68, + 592, + 333, + 608 + ], + "type": "inline_equation", + "content": "\\Delta_{\\mathcal{I}}^{t} = \\max_{j\\in [K]}\\theta_{j}^{t} - \\min_{j\\in \\mathcal{I}}\\theta_{i}^{t}" + }, + { + "bbox": [ + 68, + 592, + 333, + 608 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 68, + 613, + 541, + 642 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 613, + 541, + 642 + ], + "spans": [ + { + "bbox": [ + 68, + 613, + 541, + 642 + ], + "type": "text", + "content": "Assume " + }, + { + "bbox": [ + 68, + 613, + 541, + 642 + ], + "type": "inline_equation", + "content": "\\theta_{i*}^t = \\max_{j\\in [K]}\\theta_j^t" + }, + { + "bbox": [ + 68, + 613, + 541, + 642 + ], + "type": "text", + "content": ", because " + }, + { + "bbox": [ + 68, + 613, + 541, + 642 + ], + "type": "inline_equation", + "content": "|\\mathcal{I}|\\geq 2" + }, + { + "bbox": [ + 68, + 613, + 541, + 642 + ], + "type": "text", + "content": ", there exists " + }, + { + "bbox": [ + 68, + 613, + 541, + 642 + ], + "type": "inline_equation", + "content": "i\\neq i^{*}" + }, + { + "bbox": [ + 68, + 613, + 541, + 642 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 68, + 613, + 541, + 642 + ], + "type": "inline_equation", + "content": "\\min_{j\\in \\mathcal{I}}\\theta_i^t >0" + }, + { + "bbox": [ + 68, + 613, + 541, + 642 + ], + "type": "text", + "content": ". We will show three properties of " + }, + { + "bbox": [ + 68, + 613, + 541, + 642 + ], + "type": "inline_equation", + "content": "\\Delta_I^t" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 68, + 647, + 458, + 663 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 647, + 458, + 663 + ], + "spans": [ + { + "bbox": [ + 68, + 647, + 458, + 663 + ], + "type": "text", + "content": "First " + }, + { + "bbox": [ + 68, + 647, + 458, + 663 + ], + "type": "inline_equation", + "content": "\\Delta_{\\mathcal{I}}^{(t)}" + }, + { + "bbox": [ + 68, + 647, + 458, + 663 + ], + "type": "text", + "content": " is a submartingale defined on the filtration of the distribution of " + }, + { + "bbox": [ + 68, + 647, + 458, + 663 + ], + "type": "inline_equation", + "content": "\\theta^{(t)}" + }, + { + "bbox": [ + 68, + 647, + 458, + 663 + ], + "type": "text", + "content": " because" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 162, + 685, + 447, + 703 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 162, + 685, + 447, + 703 + ], + "spans": [ + { + "bbox": [ + 162, + 685, + 447, + 703 + ], + "type": "interline_equation", + "content": "\\mathbb {E} [ \\Delta_ {\\mathcal {I}} ^ {(t)} | \\theta_ {t} ] - \\Delta_ {\\mathcal {I}} ^ {(t - 1)} > \\mathbb {E} [ (\\theta_ {i ^ {*}} ^ {t + 1} - \\theta_ {i ^ {*}} ^ {t}) - (\\theta_ {i} ^ {t + 1} - \\theta_ {i} ^ {t}) | \\theta_ {t} ] > 0.", + "image_path": "8ce829c977aacce11e66df276705eb378c12107b1a887e9b736fc98c415e6ed5.jpg" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 68, + 715, + 205, + 728 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 715, + 205, + 728 + ], + "spans": [ + { + "bbox": [ + 68, + 715, + 205, + 728 + ], + "type": "text", + "content": "as the policy is self-enforcing." + } + ] + } + ], + "index": 21 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 69, + 26, + 263, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 26, + 263, + 38 + ], + "spans": [ + { + "bbox": [ + 69, + 26, + 263, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 746, + 310, + 756 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 746, + 310, + 756 + ], + "spans": [ + { + "bbox": [ + 300, + 746, + 310, + 756 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "bbox": [ + 68, + 75, + 266, + 92 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 75, + 266, + 92 + ], + "spans": [ + { + "bbox": [ + 68, + 75, + 266, + 92 + ], + "type": "text", + "content": "Further " + }, + { + "bbox": [ + 68, + 75, + 266, + 92 + ], + "type": "inline_equation", + "content": "\\Delta_{\\mathcal{I}}^{(t)}" + }, + { + "bbox": [ + 68, + 75, + 266, + 92 + ], + "type": "text", + "content": " has bounded growth of " + }, + { + "bbox": [ + 68, + 75, + 266, + 92 + ], + "type": "inline_equation", + "content": "2C_2" + }, + { + "bbox": [ + 68, + 75, + 266, + 92 + ], + "type": "text", + "content": " as" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 241, + 100, + 369, + 144 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 241, + 100, + 369, + 144 + ], + "spans": [ + { + "bbox": [ + 241, + 100, + 369, + 144 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} | \\max _ {j \\in [ K ]} \\theta_ {j} ^ {t + 1} - \\max _ {j \\in [ K ]} \\theta_ {j} ^ {t} | < C _ {2}. \\\\ \\bigl|\\min_{j\\in \\mathcal{I}}\\theta_{j}^{t + 1} - \\max_{j\\in \\mathcal{I}}\\theta_{j}^{t}\\bigr| < C_{2}. \\\\ \\end{array}", + "image_path": "61852e157d7ade30f6e2c0703696bface8d53ae9ba6a912c736f52927c4780a8.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 68, + 176, + 508, + 192 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 176, + 508, + 192 + ], + "spans": [ + { + "bbox": [ + 68, + 176, + 508, + 192 + ], + "type": "text", + "content": "Furthermore, the second-momentum of " + }, + { + "bbox": [ + 68, + 176, + 508, + 192 + ], + "type": "inline_equation", + "content": "\\Delta_{\\mathcal{I}}^{(t)}" + }, + { + "bbox": [ + 68, + 176, + 508, + 192 + ], + "type": "text", + "content": " needs to increase with " + }, + { + "bbox": [ + 68, + 176, + 508, + 192 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 68, + 176, + 508, + 192 + ], + "type": "text", + "content": " by a constant for any " + }, + { + "bbox": [ + 68, + 176, + 508, + 192 + ], + "type": "inline_equation", + "content": "t < \\tau_{\\epsilon}" + }, + { + "bbox": [ + 68, + 176, + 508, + 192 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 182, + 200, + 427, + 235 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 182, + 200, + 427, + 235 + ], + "spans": [ + { + "bbox": [ + 182, + 200, + 427, + 235 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathbb {E} \\left[ \\left(\\Delta_ {\\mathcal {I}} ^ {(t + 1)}\\right) ^ {2} \\mid \\theta_ {t} \\right] \\geq \\left(\\Delta_ {\\mathcal {I}} ^ {(t)}\\right) ^ {2} + \\mathbb {E} \\left[ \\left(\\Delta_ {\\mathcal {I}} ^ {(t + 1)} - \\Delta_ {\\mathcal {I}} ^ {(t)}\\right)\\right) ^ {2} \\mid \\theta_ {t} ] \\\\ \\geq \\left(\\Delta_ {I} ^ {(t)}\\right) ^ {2} + C _ {1} \\epsilon^ {2}. \\\\ \\end{array}", + "image_path": "2ba83d43ee06d46a52d1c0a7af9c34e2f33d4173d7a4793a62c64f013efa1d66.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 68, + 251, + 378, + 267 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 251, + 378, + 267 + ], + "spans": [ + { + "bbox": [ + 68, + 251, + 378, + 267 + ], + "type": "text", + "content": "When " + }, + { + "bbox": [ + 68, + 251, + 378, + 267 + ], + "type": "inline_equation", + "content": "t < \\tau_{\\epsilon}" + }, + { + "bbox": [ + 68, + 251, + 378, + 267 + ], + "type": "text", + "content": ", it holds that " + }, + { + "bbox": [ + 68, + 251, + 378, + 267 + ], + "type": "inline_equation", + "content": "\\Delta_{\\mathcal{I}}^{(t)} < \\log \\frac{2}{\\epsilon}" + }, + { + "bbox": [ + 68, + 251, + 378, + 267 + ], + "type": "text", + "content": ", otherwise we can prove that" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 165, + 274, + 444, + 300 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 165, + 274, + 444, + 300 + ], + "spans": [ + { + "bbox": [ + 165, + 274, + 444, + 300 + ], + "type": "interline_equation", + "content": "\\max _ {i, j \\in \\{1, 2 \\}} p _ {i} / p _ {j} = \\exp (\\Delta_ {\\mathcal {I}} ^ {(t)}) > \\frac {2 - 2 \\epsilon}{\\epsilon}. \\Rightarrow \\max _ {i \\in \\{1, 2 \\}} p _ {i} > 1 - \\epsilon .", + "image_path": "3a918ee88871d88fcb837e4186f8c5168750ffbf4b0c2ac0d7564ec6d2f81fd3.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 308, + 387, + 321 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 308, + 387, + 321 + ], + "spans": [ + { + "bbox": [ + 69, + 308, + 387, + 321 + ], + "type": "text", + "content": "This is a contradiction. Further, by Martingale inequality, we have that" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 190, + 328, + 420, + 350 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 190, + 328, + 420, + 350 + ], + "spans": [ + { + "bbox": [ + 190, + 328, + 420, + 350 + ], + "type": "interline_equation", + "content": "\\mathbb {E} [ \\left(\\Delta^ {\\min \\{t, \\tau_ {\\epsilon} \\}}\\right) ^ {2} ] > \\mathbb {E} [ \\left(\\Delta^ {0}\\right) ^ {2} ] + C _ {1} \\epsilon^ {2} \\mathbb {E} [ \\min \\{t, \\tau_ {\\epsilon} \\} ]", + "image_path": "d0b7ba9d3fe9a3b515301116f9548cf159ed54d41ee527845462fc6abdd2b25e.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 69, + 364, + 292, + 377 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 364, + 292, + 377 + ], + "spans": [ + { + "bbox": [ + 69, + 364, + 292, + 377 + ], + "type": "text", + "content": "Further, as " + }, + { + "bbox": [ + 69, + 364, + 292, + 377 + ], + "type": "inline_equation", + "content": "\\Delta^t" + }, + { + "bbox": [ + 69, + 364, + 292, + 377 + ], + "type": "text", + "content": " has bounded growth, we have that" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 224, + 384, + 386, + 409 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 224, + 384, + 386, + 409 + ], + "spans": [ + { + "bbox": [ + 224, + 384, + 386, + 409 + ], + "type": "interline_equation", + "content": "\\mathbb {E} \\left[ \\left(\\Delta^ {\\min \\{t, \\tau_ {\\epsilon} \\}}\\right) ^ {2} \\right] < (\\log \\frac {2}{\\epsilon} + 2 C _ {2}) ^ {2}.", + "image_path": "d44e7811d73135bd84f5bafc86582a0b9eb0cab2b18727f87186e109f0181cc6.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 68, + 422, + 349, + 443 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 422, + 349, + 443 + ], + "spans": [ + { + "bbox": [ + 68, + 422, + 349, + 443 + ], + "type": "text", + "content": "This implies " + }, + { + "bbox": [ + 68, + 422, + 349, + 443 + ], + "type": "inline_equation", + "content": "\\mathbb{E}[\\min \\{t,\\tau_{\\epsilon}\\}] < \\frac{(\\log\\frac{2}{\\epsilon} + 2C_2)^2}{C_1\\epsilon^2}" + }, + { + "bbox": [ + 68, + 422, + 349, + 443 + ], + "type": "text", + "content": " for all " + }, + { + "bbox": [ + 68, + 422, + 349, + 443 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 68, + 422, + 349, + 443 + ], + "type": "text", + "content": ", this implies" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 250, + 450, + 360, + 479 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 250, + 450, + 360, + 479 + ], + "spans": [ + { + "bbox": [ + 250, + 450, + 360, + 479 + ], + "type": "interline_equation", + "content": "\\mathbb {E} [ \\tau_ {\\epsilon} ] < \\frac {(\\log \\frac {2}{\\epsilon} + 2 C _ {2}) ^ {2}}{C _ {1} \\epsilon^ {2}}.", + "image_path": "617a18e5724e8faa2af11981ddb7e906d78aa4383d6f8ef809a5eba43aade27f.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 69, + 491, + 266, + 505 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 491, + 266, + 505 + ], + "spans": [ + { + "bbox": [ + 69, + 491, + 266, + 505 + ], + "type": "text", + "content": "Further, by Markov inequality, if we choose" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 250, + 512, + 361, + 540 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 250, + 512, + 361, + 540 + ], + "spans": [ + { + "bbox": [ + 250, + 512, + 361, + 540 + ], + "type": "interline_equation", + "content": "T _ {\\epsilon , \\delta , 2} = \\frac {(\\log \\frac {2}{\\epsilon} + 2 C _ {2}) ^ {2}}{C _ {1} \\epsilon^ {2} \\delta}.", + "image_path": "75219cfb4f883bfd007fe962ddcd413837dbf356a779a36c396e3403fd88d124.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 69, + 548, + 94, + 559 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 548, + 94, + 559 + ], + "spans": [ + { + "bbox": [ + 69, + 548, + 94, + 559 + ], + "type": "text", + "content": "then," + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 240, + 566, + 370, + 594 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 240, + 566, + 370, + 594 + ], + "spans": [ + { + "bbox": [ + 240, + 566, + 370, + 594 + ], + "type": "interline_equation", + "content": "\\Pr \\left(\\tau_ {\\epsilon} > T _ {\\epsilon , \\delta , 2}\\right) < \\frac {\\mathbb {E} \\left[ \\tau_ {\\epsilon} \\right]}{T _ {\\epsilon , \\delta , 2}} < \\delta .", + "image_path": "72f39f593256e825aa89800656acb5de75dd73f0b003c3ffe78459ddb1a8bb19.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 69, + 607, + 233, + 620 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 607, + 233, + 620 + ], + "spans": [ + { + "bbox": [ + 69, + 607, + 233, + 620 + ], + "type": "text", + "content": "This concludes the proof for " + }, + { + "bbox": [ + 69, + 607, + 233, + 620 + ], + "type": "inline_equation", + "content": "K = 2" + }, + { + "bbox": [ + 69, + 607, + 233, + 620 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 68, + 625, + 541, + 654 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 625, + 541, + 654 + ], + "spans": [ + { + "bbox": [ + 68, + 625, + 541, + 654 + ], + "type": "text", + "content": "Now assuming the result holds for " + }, + { + "bbox": [ + 68, + 625, + 541, + 654 + ], + "type": "inline_equation", + "content": "K - 1" + }, + { + "bbox": [ + 68, + 625, + 541, + 654 + ], + "type": "text", + "content": " and consider the case for " + }, + { + "bbox": [ + 68, + 625, + 541, + 654 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 68, + 625, + 541, + 654 + ], + "type": "text", + "content": ", First, we choose a small enough constant " + }, + { + "bbox": [ + 68, + 625, + 541, + 654 + ], + "type": "inline_equation", + "content": "C_{\\delta ,\\epsilon ,K,N} > 0" + }, + { + "bbox": [ + 68, + 625, + 541, + 654 + ], + "type": "text", + "content": ", such that when " + }, + { + "bbox": [ + 68, + 625, + 541, + 654 + ], + "type": "inline_equation", + "content": "p_{K - 1}^{(0)} < C_{\\delta ,\\epsilon ,K,N}" + }, + { + "bbox": [ + 68, + 625, + 541, + 654 + ], + "type": "text", + "content": ", the following two random processes are close:" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 95, + 672, + 539, + 728 + ], + "type": "list", + "angle": 0, + "index": 21, + "blocks": [ + { + "bbox": [ + 96, + 672, + 447, + 688 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 672, + 447, + 688 + ], + "spans": [ + { + "bbox": [ + 96, + 672, + 447, + 688 + ], + "type": "text", + "content": "- Running the algorithm for " + }, + { + "bbox": [ + 96, + 672, + 447, + 688 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 96, + 672, + 447, + 688 + ], + "type": "text", + "content": " steps on the " + }, + { + "bbox": [ + 96, + 672, + 447, + 688 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 96, + 672, + 447, + 688 + ], + "type": "text", + "content": " arms bandit yields " + }, + { + "bbox": [ + 96, + 672, + 447, + 688 + ], + "type": "inline_equation", + "content": "\\theta_i^{(t)}, i \\in [K]" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 95, + 696, + 539, + 728 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 95, + 696, + 539, + 728 + ], + "spans": [ + { + "bbox": [ + 95, + 696, + 539, + 728 + ], + "type": "text", + "content": "- Running the algorithm for " + }, + { + "bbox": [ + 95, + 696, + 539, + 728 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 95, + 696, + 539, + 728 + ], + "type": "text", + "content": " steps on a " + }, + { + "bbox": [ + 95, + 696, + 539, + 728 + ], + "type": "inline_equation", + "content": "K - 1" + }, + { + "bbox": [ + 95, + 696, + 539, + 728 + ], + "type": "text", + "content": " arms bandit yields " + }, + { + "bbox": [ + 95, + 696, + 539, + 728 + ], + "type": "inline_equation", + "content": "\\tilde{\\theta}_i^{(t)}, i \\in [K - 1]" + }, + { + "bbox": [ + 95, + 696, + 539, + 728 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 95, + 696, + 539, + 728 + ], + "type": "inline_equation", + "content": "\\tilde{\\theta}_i^{(0)} = \\theta_i^{(0)}, i < K - 1" + }, + { + "bbox": [ + 95, + 696, + 539, + 728 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 95, + 696, + 539, + 728 + ], + "type": "inline_equation", + "content": "\\tilde{\\theta}_{K - 1}^{(0)} = \\theta_K(0)" + } + ] + } + ], + "index": 20 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 69, + 26, + 263, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 26, + 263, + 38 + ], + "spans": [ + { + "bbox": [ + 69, + 26, + 263, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 746, + 310, + 755 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 746, + 310, + 755 + ], + "spans": [ + { + "bbox": [ + 300, + 746, + 310, + 755 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "bbox": [ + 68, + 76, + 308, + 89 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 76, + 308, + 89 + ], + "spans": [ + { + "bbox": [ + 68, + 76, + 308, + 89 + ], + "type": "text", + "content": "and there exists a joint measure on " + }, + { + "bbox": [ + 68, + 76, + 308, + 89 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 68, + 76, + 308, + 89 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 68, + 76, + 308, + 89 + ], + "type": "inline_equation", + "content": "\\tilde{\\theta}" + }, + { + "bbox": [ + 68, + 76, + 308, + 89 + ], + "type": "text", + "content": " such that" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 194, + 94, + 414, + 110 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 194, + 94, + 414, + 110 + ], + "spans": [ + { + "bbox": [ + 194, + 94, + 414, + 110 + ], + "type": "interline_equation", + "content": "\\forall i \\in [ K - 2 ], t < N, \\Pr (| p _ {i} ^ {t} - \\tilde {p} _ {i} ^ {t} | > \\epsilon / 2) < \\delta / 6.", + "image_path": "55bfba616e05bf812288c69d8876b6ddee39b2e5e60a5aefda879cfecbd5feea.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 272, + 111, + 414, + 126 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 272, + 111, + 414, + 126 + ], + "spans": [ + { + "bbox": [ + 272, + 111, + 414, + 126 + ], + "type": "interline_equation", + "content": "\\operatorname * {P r} (| p _ {K} ^ {t} - \\tilde {p} _ {K - 1} ^ {t} | > \\epsilon / 2) < \\delta / 6.", + "image_path": "572c6f391423923f00eb57fedd5c685d498bf084b4ad7b1a487a5c5595d86909.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 282, + 129, + 414, + 144 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 282, + 129, + 414, + 144 + ], + "spans": [ + { + "bbox": [ + 282, + 129, + 414, + 144 + ], + "type": "interline_equation", + "content": "\\Pr \\left(\\left| p _ {K} ^ {t} - p _ {K} ^ {0} \\right| > \\epsilon / 2\\right) < \\delta / 6.", + "image_path": "3a2e6bad1cb29144c1f3bf2705e2569db4a1b64e5d3e95fc84c11521e6263501.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 68, + 155, + 542, + 194 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 155, + 542, + 194 + ], + "spans": [ + { + "bbox": [ + 68, + 155, + 542, + 194 + ], + "type": "text", + "content": "This joint measure is constructed by choosing the corresponding arm for two process at each sampling step as long as the sampled arm is not " + }, + { + "bbox": [ + 68, + 155, + 542, + 194 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 68, + 155, + 542, + 194 + ], + "type": "text", + "content": " and uses the uniform convergence on " + }, + { + "bbox": [ + 68, + 155, + 542, + 194 + ], + "type": "inline_equation", + "content": "\\nabla \\log_{\\theta} p_i" + }, + { + "bbox": [ + 68, + 155, + 542, + 194 + ], + "type": "text", + "content": ". Now following the same argument at " + }, + { + "bbox": [ + 68, + 155, + 542, + 194 + ], + "type": "inline_equation", + "content": "K = 2" + }, + { + "bbox": [ + 68, + 155, + 542, + 194 + ], + "type": "text", + "content": ", we can show that there exists " + }, + { + "bbox": [ + 68, + 155, + 542, + 194 + ], + "type": "inline_equation", + "content": "\\tilde{T}_{\\epsilon, \\delta, K}" + }, + { + "bbox": [ + 68, + 155, + 542, + 194 + ], + "type": "text", + "content": " such that" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 176, + 199, + 433, + 222 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 176, + 199, + 433, + 222 + ], + "spans": [ + { + "bbox": [ + 176, + 199, + 433, + 222 + ], + "type": "interline_equation", + "content": "\\operatorname * {P r} (\\exists t < \\tilde {T} _ {\\epsilon , \\delta , K}, \\min _ {t \\in [ K ]} p _ {t} < C _ {\\delta , \\epsilon , K, T _ {\\epsilon / 2, \\delta / 2, K - 1}}) > 1 - \\delta / 2.", + "image_path": "2129eafde804b7c6253031f96635d28714155c8f61913d221bec62adc0d05819.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 68, + 233, + 542, + 271 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 233, + 542, + 271 + ], + "spans": [ + { + "bbox": [ + 68, + 233, + 542, + 271 + ], + "type": "text", + "content": "Then we can invoke the induction hypothesis and uses the coupling shown above to show that if we choose " + }, + { + "bbox": [ + 68, + 233, + 542, + 271 + ], + "type": "inline_equation", + "content": "T_{\\epsilon, \\delta, K} = \\tilde{T}_{\\epsilon, \\delta, K} + T_{\\epsilon/2, \\delta/2, K-1}" + }, + { + "bbox": [ + 68, + 233, + 542, + 271 + ], + "type": "text", + "content": ", then there exists a time step that one arm has probability higher than " + }, + { + "bbox": [ + 68, + 233, + 542, + 271 + ], + "type": "inline_equation", + "content": "1 - \\epsilon" + }, + { + "bbox": [ + 68, + 233, + 542, + 271 + ], + "type": "text", + "content": " with probability at least " + }, + { + "bbox": [ + 68, + 233, + 542, + 271 + ], + "type": "inline_equation", + "content": "1 - \\delta" + }, + { + "bbox": [ + 68, + 233, + 542, + 271 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 529, + 276, + 541, + 287 + ], + "blocks": [ + { + "bbox": [ + 529, + 276, + 541, + 287 + ], + "lines": [ + { + "bbox": [ + 529, + 276, + 541, + 287 + ], + "spans": [ + { + "bbox": [ + 529, + 276, + 541, + 287 + ], + "type": "image", + "image_path": "ca1b539d95c5fb460f2be006fa7134f3f1ba977fdaf654bf1420e28cac93d5b2.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "bbox": [ + 68, + 295, + 542, + 325 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 295, + 542, + 325 + ], + "spans": [ + { + "bbox": [ + 68, + 295, + 542, + 325 + ], + "type": "text", + "content": "Lemma C.6. The REINFORCE algorithm without KL regularization (" + }, + { + "bbox": [ + 68, + 295, + 542, + 325 + ], + "type": "inline_equation", + "content": "\\beta = 0" + }, + { + "bbox": [ + 68, + 295, + 542, + 325 + ], + "type": "text", + "content": ") is self-enforcing stochastic (Definition C.2) once " + }, + { + "bbox": [ + 68, + 295, + 542, + 325 + ], + "type": "inline_equation", + "content": "p_{K+1}^{(t)} < 1/2" + }, + { + "bbox": [ + 68, + 295, + 542, + 325 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 69, + 335, + 338, + 348 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 335, + 338, + 348 + ], + "spans": [ + { + "bbox": [ + 69, + 335, + 338, + 348 + ], + "type": "text", + "content": "Proof. The REINFORCE algorithm is self-enforcing because" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 209, + 354, + 399, + 384 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 209, + 354, + 399, + 384 + ], + "spans": [ + { + "bbox": [ + 209, + 354, + 399, + 384 + ], + "type": "interline_equation", + "content": "\\mathbb {E} [ \\theta_ {i} ^ {(t + 1)} - \\theta_ {i} ^ {(t)} ] = \\eta p _ {i} (r _ {i} - \\sum_ {j \\in [ K + 1 ]} p _ {j} r _ {j}).", + "image_path": "219b5d70f1361d0524e9e43e4ca877a094875ed4025d0d18eae4a0143396e6c9.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 69, + 396, + 108, + 407 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 396, + 108, + 407 + ], + "spans": [ + { + "bbox": [ + 69, + 396, + 108, + 407 + ], + "type": "text", + "content": "Further," + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 262, + 412, + 348, + 430 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 262, + 412, + 348, + 430 + ], + "spans": [ + { + "bbox": [ + 262, + 412, + 348, + 430 + ], + "type": "interline_equation", + "content": "| \\theta_ {i} ^ {(t + 1)} - \\theta_ {i} ^ {(t)} | \\leq 1", + "image_path": "1c07139c31aa052e9802591db451bb95cd466333559e1496c77fb8de1be1c789.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 68, + 435, + 460, + 465 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 435, + 460, + 465 + ], + "spans": [ + { + "bbox": [ + 68, + 435, + 460, + 465 + ], + "type": "text", + "content": "and if we consider the distribution of " + }, + { + "bbox": [ + 68, + 435, + 460, + 465 + ], + "type": "inline_equation", + "content": "\\Delta_{i,i^*,t} = \\frac{\\left(\\theta_i^{(t + 1)} - \\theta_i^{(t)}\\right) - \\left(\\theta_{i^*}^{(t + 1)} - \\theta_{i^*}^{(t)}\\right)}{\\eta}" + }, + { + "bbox": [ + 68, + 435, + 460, + 465 + ], + "type": "text", + "content": ", it holds that" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 196, + 470, + 413, + 485 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 196, + 470, + 413, + 485 + ], + "spans": [ + { + "bbox": [ + 196, + 470, + 413, + 485 + ], + "type": "interline_equation", + "content": "\\Delta_ {i, i ^ {*}, t} = r _ {I _ {t}} \\left(\\mathbf {1} (i = I _ {t}) - \\mathbf {1} (i ^ {*} = I _ {t}) - p _ {i} + p _ {i ^ {*}}\\right)", + "image_path": "9904b72553ca60a37dd41207977ebf59630a4c6ab09f09ed468ec2b71b8662e2.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 192, + 514, + 416, + 529 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 192, + 514, + 416, + 529 + ], + "spans": [ + { + "bbox": [ + 192, + 514, + 416, + 529 + ], + "type": "interline_equation", + "content": "\\Pr \\left(\\Delta_ {i, i ^ {*}, t} = - 1 - p _ {i} + p _ {i} ^ {*}\\right) \\geq \\Pr \\left(I _ {t} = i ^ {*}\\right) = p _ {i ^ {*}}", + "image_path": "2afd9d9121757c862096b1a3d7ace4ff98bfb6719ea911ea906426aa924ec8b7.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 69, + 540, + 117, + 551 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 540, + 117, + 551 + ], + "spans": [ + { + "bbox": [ + 69, + 540, + 117, + 551 + ], + "type": "text", + "content": "Therefore" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 227, + 556, + 380, + 602 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 227, + 556, + 380, + 602 + ], + "spans": [ + { + "bbox": [ + 227, + 556, + 380, + 602 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathbb {E} \\left[ \\Delta_ {i, i ^ {*}, t} ^ {2} \\right] \\geq p _ {i ^ {*}} \\left(- 1 - p _ {i} + p _ {i} ^ {*}\\right) ^ {2} \\\\ \\geq p _ {i ^ {*}} (1 - p _ {i ^ {*}}) ^ {2} \\geq \\frac {\\epsilon^ {2}}{2 K}. \\\\ \\end{array}", + "image_path": "7cfd0f10650b5e5d393aae358e8073bb53dac9ce247d2aced5c9cee49e6ed744.jpg" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 68, + 607, + 342, + 620 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 607, + 342, + 620 + ], + "spans": [ + { + "bbox": [ + 68, + 607, + 342, + 620 + ], + "type": "text", + "content": "This then concludes the proof with " + }, + { + "bbox": [ + 68, + 607, + 342, + 620 + ], + "type": "inline_equation", + "content": "C_1 = \\eta / 2K" + }, + { + "bbox": [ + 68, + 607, + 342, + 620 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 68, + 607, + 342, + 620 + ], + "type": "inline_equation", + "content": "C_2 = \\eta" + }, + { + "bbox": [ + 68, + 607, + 342, + 620 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 68, + 625, + 542, + 656 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 625, + 542, + 656 + ], + "spans": [ + { + "bbox": [ + 68, + 625, + 542, + 656 + ], + "type": "text", + "content": "Lemma C.7. The GRPO algorithm without KL regularization (" + }, + { + "bbox": [ + 68, + 625, + 542, + 656 + ], + "type": "inline_equation", + "content": "\\beta = 0" + }, + { + "bbox": [ + 68, + 625, + 542, + 656 + ], + "type": "text", + "content": ") is self-enforcing stochastic (Definition C.2) once " + }, + { + "bbox": [ + 68, + 625, + 542, + 656 + ], + "type": "inline_equation", + "content": "p_{K+1}^{(t)} < 1/2" + }, + { + "bbox": [ + 68, + 625, + 542, + 656 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 69, + 666, + 309, + 679 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 666, + 309, + 679 + ], + "spans": [ + { + "bbox": [ + 69, + 666, + 309, + 679 + ], + "type": "text", + "content": "Proof. The GRPO algorithm is self-enforcing because" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 77, + 685, + 531, + 705 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 685, + 531, + 705 + ], + "spans": [ + { + "bbox": [ + 77, + 685, + 531, + 705 + ], + "type": "interline_equation", + "content": "\\mathbb {E} [ \\theta_ {i} ^ {(t + 1)} - \\theta_ {i} ^ {(t)} ] = \\eta \\mathbb {E} [ \\tilde {r} _ {t} ^ {(g)} (\\mathbf {1} (I _ {t} ^ {(g)} = i) - p _ {i} ^ {(t)}) ] = \\eta \\mathbb {E} [ \\tilde {r} _ {t} ^ {(g)} \\mathbf {1} (I _ {t} ^ {(g)} = i) ] = \\eta \\mathbb {E} _ {\\mu_ {t}} [ \\mathbb {E} [ \\tilde {r} _ {t} ^ {(g)} \\mathbf {1} (I _ {t} ^ {(g)} = i) | \\mu_ {t} ] ].", + "image_path": "973273b93ff0af969d810af2019c0e62c8240895ed9620fb5e5ac263b61fb546.jpg" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 68, + 713, + 446, + 728 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 713, + 446, + 728 + ], + "spans": [ + { + "bbox": [ + 68, + 713, + 446, + 728 + ], + "type": "text", + "content": "Noted that " + }, + { + "bbox": [ + 68, + 713, + 446, + 728 + ], + "type": "inline_equation", + "content": "\\mathbb{E}[\\tilde{r}_t^{(g)}\\mathbf{1}(I_t^{(g)} = i)|\\mu_t]" + }, + { + "bbox": [ + 68, + 713, + 446, + 728 + ], + "type": "text", + "content": " is monotonous with " + }, + { + "bbox": [ + 68, + 713, + 446, + 728 + ], + "type": "inline_equation", + "content": "p_i" + }, + { + "bbox": [ + 68, + 713, + 446, + 728 + ], + "type": "text", + "content": ", hence monotonous with " + }, + { + "bbox": [ + 68, + 713, + 446, + 728 + ], + "type": "inline_equation", + "content": "\\theta_{i}" + }, + { + "bbox": [ + 68, + 713, + 446, + 728 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 23 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 69, + 26, + 263, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 26, + 263, + 38 + ], + "spans": [ + { + "bbox": [ + 69, + 26, + 263, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 746, + 310, + 756 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 746, + 310, + 756 + ], + "spans": [ + { + "bbox": [ + 300, + 746, + 310, + 756 + ], + "type": "text", + "content": "19" + } + ] + } + ], + "index": 24 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 18 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 76, + 106, + 87 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 76, + 106, + 87 + ], + "spans": [ + { + "bbox": [ + 69, + 76, + 106, + 87 + ], + "type": "text", + "content": "Further" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 194, + 90, + 417, + 136 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 194, + 90, + 417, + 136 + ], + "spans": [ + { + "bbox": [ + 194, + 90, + 417, + 136 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} | \\theta_ {i} ^ {(t + 1)} - \\theta_ {i} ^ {(t)} | \\leq \\eta \\max _ {g} | \\tilde {r} _ {t} ^ {(g)} (\\mathbf {1} (I _ {t} ^ {(g)} = i) - p _ {i} ^ {(t)}) | \\\\ \\leq \\eta \\max _ {g} | \\tilde {r} _ {t} ^ {(g)} | \\leq \\eta \\sqrt {G}. \\\\ \\end{array}", + "image_path": "2f678190c34bc263f209d564e733b949a49f0c5b80c07037e411d4cdccc2776b.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 145, + 345, + 157 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 145, + 345, + 157 + ], + "spans": [ + { + "bbox": [ + 69, + 145, + 345, + 157 + ], + "type": "text", + "content": "Now we only need to lower bound the second momentum of" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 207, + 160, + 402, + 196 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 207, + 160, + 402, + 196 + ], + "spans": [ + { + "bbox": [ + 207, + 160, + 402, + 196 + ], + "type": "interline_equation", + "content": "\\Delta_ {i, i ^ {*}, t} = \\frac {\\left(\\theta_ {i} ^ {(t + 1)} - \\theta_ {i} ^ {(t)}\\right) - \\left(\\theta_ {i ^ {*}} ^ {(t + 1)} - \\theta_ {i ^ {*}} ^ {(t)}\\right)}{\\eta}", + "image_path": "5dcada9b466ab9fe7f31300e240f281a0a980bf61c1e1d8185ad399f27eb0286.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 210, + 120, + 221 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 210, + 120, + 221 + ], + "spans": [ + { + "bbox": [ + 69, + 210, + 120, + 221 + ], + "type": "text", + "content": "Noted that" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 219, + 224, + 390, + 259 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 219, + 224, + 390, + 259 + ], + "spans": [ + { + "bbox": [ + 219, + 224, + 390, + 259 + ], + "type": "interline_equation", + "content": "\\theta_ {i} ^ {(t + 1)} - \\theta_ {i} ^ {(t)} = \\frac {\\eta}{G} \\sum_ {g = 1} ^ {G} \\tilde {r} _ {t} ^ {(g)} \\mathbf {1} (I _ {t} ^ {(g)} = i).", + "image_path": "0f296249734c6e78fe73f1f7ad3efd2ffa14c033f2625ceb5f3ec2ec4cadfdf1.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 269, + 126, + 280 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 269, + 126, + 280 + ], + "spans": [ + { + "bbox": [ + 69, + 269, + 126, + 280 + ], + "type": "text", + "content": "It holds that" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 159, + 282, + 451, + 321 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 159, + 282, + 451, + 321 + ], + "spans": [ + { + "bbox": [ + 159, + 282, + 451, + 321 + ], + "type": "interline_equation", + "content": "\\sigma_ {t} = \\sqrt {\\frac {1}{G} \\sum_ {g} (r _ {t} ^ {g} - \\mu) ^ {2}} = \\sqrt {\\frac {1}{G} \\sum_ {g} r _ {t} ^ {g} - 2 \\mu r _ {t} ^ {g} + \\mu^ {2}} = \\sqrt {\\mu - \\mu^ {2}}.", + "image_path": "69f407d3727977dd3ff9ce3bea90b6e7f5e959bfd3caf47043b8ae66bc1639f6.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 69, + 331, + 186, + 346 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 331, + 186, + 346 + ], + "spans": [ + { + "bbox": [ + 69, + 331, + 186, + 346 + ], + "type": "text", + "content": "Therefore when " + }, + { + "bbox": [ + 69, + 331, + 186, + 346 + ], + "type": "inline_equation", + "content": "r_t^{(g)} > 0" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 186, + 350, + 423, + 384 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 186, + 350, + 423, + 384 + ], + "spans": [ + { + "bbox": [ + 186, + 350, + 423, + 384 + ], + "type": "interline_equation", + "content": "\\tilde {r} _ {t} ^ {(g)} = \\frac {r _ {t} ^ {(g)} - \\mu_ {t}}{\\sigma_ {t}} = \\frac {1 - \\mu_ {t}}{\\sigma_ {t}} = \\sqrt {\\frac {1 - \\mu_ {t}}{\\mu_ {t}}} \\geq \\sqrt {\\frac {1}{G - 1}}.", + "image_path": "88f304b72e16859b9ddb0909f86dcbdda34b432dc64f59e1b583d5f961c70d85.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 69, + 394, + 392, + 410 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 394, + 392, + 410 + ], + "spans": [ + { + "bbox": [ + 69, + 394, + 392, + 410 + ], + "type": "text", + "content": "Because all " + }, + { + "bbox": [ + 69, + 394, + 392, + 410 + ], + "type": "inline_equation", + "content": "\\tilde{r}_t^{(g)}" + }, + { + "bbox": [ + 69, + 394, + 392, + 410 + ], + "type": "text", + "content": " are the same when " + }, + { + "bbox": [ + 69, + 394, + 392, + 410 + ], + "type": "inline_equation", + "content": "r_t^{(g)} > 0" + }, + { + "bbox": [ + 69, + 394, + 392, + 410 + ], + "type": "text", + "content": ", it holds that when " + }, + { + "bbox": [ + 69, + 394, + 392, + 410 + ], + "type": "inline_equation", + "content": "i \\in [K]" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 179, + 414, + 431, + 495 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 179, + 414, + 431, + 495 + ], + "spans": [ + { + "bbox": [ + 179, + 414, + 431, + 495 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\Delta_ {i, i ^ {*}, t} ^ {2} = \\frac {1}{G} \\frac {1 - \\mu_ {t}}{\\mu_ {t}} \\left(\\sum_ {g = 1} ^ {G} {\\bf 1} (I _ {t} ^ {(g)} = i) - {\\bf 1} (I _ {t} ^ {(g)} = i ^ {*})\\right) ^ {2} \\\\ \\geq \\frac {1}{G (G - 1)} \\left(\\sum_ {g = 1} ^ {G} \\mathbf {1} \\left(I _ {t} ^ {(g)} = i\\right) - \\mathbf {1} \\left(I _ {t} ^ {(g)} = i ^ {*}\\right)\\right) ^ {2}. \\\\ \\end{array}", + "image_path": "0c94c758263527048a63f3968a11597da849450b1792cda84ac408bad5f98b58.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 69, + 504, + 149, + 517 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 504, + 149, + 517 + ], + "spans": [ + { + "bbox": [ + 69, + 504, + 149, + 517 + ], + "type": "text", + "content": "This then implies" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 139, + 520, + 468, + 565 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 139, + 520, + 468, + 565 + ], + "spans": [ + { + "bbox": [ + 139, + 520, + 468, + 565 + ], + "type": "interline_equation", + "content": "\\mathbb {E} [ \\Delta_ {i, i ^ {*}, t} ^ {2} ] \\geq \\frac {1}{G (G - 1)} \\mathbb {E} \\left[ \\left(\\sum_ {g = 1} ^ {G} {\\bf 1} (I _ {t} ^ {(g)} = i) - {\\bf 1} (I _ {t} ^ {(g)} = i ^ {*})\\right) ^ {2} \\Big | \\mu_ {t} \\neq 1, 0 \\right]", + "image_path": "e6f83b4a99c6422bf8387b129df86c65e1957eac012f355da11bc5a79b5e0f61.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 69, + 574, + 394, + 590 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 574, + 394, + 590 + ], + "spans": [ + { + "bbox": [ + 69, + 574, + 394, + 590 + ], + "type": "text", + "content": "One can without loss of generality assume " + }, + { + "bbox": [ + 69, + 574, + 394, + 590 + ], + "type": "inline_equation", + "content": "I_{t}^{(G)} = K + 1" + }, + { + "bbox": [ + 69, + 574, + 394, + 590 + ], + "type": "text", + "content": " and show that" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 148, + 594, + 462, + 666 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 148, + 594, + 462, + 666 + ], + "spans": [ + { + "bbox": [ + 148, + 594, + 462, + 666 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathbb {E} \\left[ \\Delta_ {i, i ^ {*}, t} ^ {2} \\right] \\geq \\frac {1}{G (G - 1)} \\mathbb {E} \\left[ \\left(\\sum_ {g = 1} ^ {G - 1} \\mathbf {1} \\left(I _ {t} ^ {(g)} = i\\right) - \\mathbf {1} \\left(I _ {t} ^ {(g)} = i ^ {*}\\right)\\right) ^ {2} \\right] \\\\ \\geq \\frac {1}{G} \\mathbb {E} \\left[ \\left(\\mathbf {1} \\left(I _ {t} ^ {(1)} = i\\right) - \\mathbf {1} \\left(I _ {t} ^ {(1)} = i ^ {*}\\right)\\right) ^ {2} \\right] = \\frac {p _ {i} + p _ {i} ^ {*}}{G} \\geq \\frac {1}{2 K G}. \\\\ \\end{array}", + "image_path": "34dd8473c7bea803c37a13a33b349b2cd610686e846013957bdec7e504f82175.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 68, + 675, + 541, + 711 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 675, + 541, + 711 + ], + "spans": [ + { + "bbox": [ + 68, + 675, + 541, + 711 + ], + "type": "text", + "content": "When " + }, + { + "bbox": [ + 68, + 675, + 541, + 711 + ], + "type": "inline_equation", + "content": "i \\neq K" + }, + { + "bbox": [ + 68, + 675, + 541, + 711 + ], + "type": "text", + "content": ", noted that " + }, + { + "bbox": [ + 68, + 675, + 541, + 711 + ], + "type": "inline_equation", + "content": "\\left(\\theta_{i}^{(t+1)} - \\theta_{i}^{(t)}\\right) - \\left(\\theta_{i^{*}}^{(t+1)} - \\theta_{i^{*}}^{(t)}\\right) > \\left(\\theta_{i}^{(t+1)} - \\theta_{i}^{(t)}\\right) > 0" + }, + { + "bbox": [ + 68, + 675, + 541, + 711 + ], + "type": "text", + "content": ". Therefore, a similar bound can show that " + }, + { + "bbox": [ + 68, + 675, + 541, + 711 + ], + "type": "inline_equation", + "content": "\\mathbb{E}[\\Delta_{i,i^{*},t}^{2}] > \\frac{1}{2KG}" + }, + { + "bbox": [ + 68, + 675, + 541, + 711 + ], + "type": "text", + "content": ". This then concludes the proof with " + }, + { + "bbox": [ + 68, + 675, + 541, + 711 + ], + "type": "inline_equation", + "content": "C_{1} = \\eta / 2KG" + }, + { + "bbox": [ + 68, + 675, + 541, + 711 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 68, + 675, + 541, + 711 + ], + "type": "inline_equation", + "content": "C_{2} = \\sqrt{G}" + }, + { + "bbox": [ + 68, + 675, + 541, + 711 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 529, + 715, + 540, + 725 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 529, + 715, + 540, + 725 + ], + "spans": [ + { + "bbox": [ + 529, + 715, + 540, + 725 + ], + "type": "text", + "content": "□" + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 69, + 26, + 263, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 26, + 263, + 38 + ], + "spans": [ + { + "bbox": [ + 69, + 26, + 263, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 746, + 310, + 755 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 746, + 310, + 755 + ], + "spans": [ + { + "bbox": [ + 299, + 746, + 310, + 755 + ], + "type": "text", + "content": "20" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 19 + }, + { + "para_blocks": [ + { + "bbox": [ + 68, + 76, + 361, + 90 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 76, + 361, + 90 + ], + "spans": [ + { + "bbox": [ + 68, + 76, + 361, + 90 + ], + "type": "text", + "content": "C.4 Diversity Never Improves with KL regularization" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 68, + 99, + 542, + 125 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 99, + 542, + 125 + ], + "spans": [ + { + "bbox": [ + 68, + 99, + 542, + 125 + ], + "type": "text", + "content": "Theorem C.8 (Diversity Preservation under KL Regularization). With " + }, + { + "bbox": [ + 68, + 99, + 542, + 125 + ], + "type": "inline_equation", + "content": "p_0" + }, + { + "bbox": [ + 68, + 99, + 542, + 125 + ], + "type": "text", + "content": " as the initial policy and KL regularization hyperparameter " + }, + { + "bbox": [ + 68, + 99, + 542, + 125 + ], + "type": "inline_equation", + "content": "\\beta > 0" + }, + { + "bbox": [ + 68, + 99, + 542, + 125 + ], + "type": "text", + "content": ", if the REINFORCE process converges to policy " + }, + { + "bbox": [ + 68, + 99, + 542, + 125 + ], + "type": "inline_equation", + "content": "p^*" + }, + { + "bbox": [ + 68, + 99, + 542, + 125 + ], + "type": "text", + "content": ". Then, " + }, + { + "bbox": [ + 68, + 99, + 542, + 125 + ], + "type": "inline_equation", + "content": "p^*" + }, + { + "bbox": [ + 68, + 99, + 542, + 125 + ], + "type": "text", + "content": " satisfies:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 198, + 129, + 411, + 161 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 198, + 129, + 411, + 161 + ], + "spans": [ + { + "bbox": [ + 198, + 129, + 411, + 161 + ], + "type": "interline_equation", + "content": "\\frac {p ^ {*} (i)}{\\sum_ {j = 1} ^ {K} p ^ {*} (j)} = \\frac {p _ {0} (i)}{\\sum_ {j = 1} ^ {K} p _ {0} (j)} \\quad \\forall i \\in \\{1, \\dots , K \\}.", + "image_path": "c02a1a4e88be241a1b94435af768afb77cb72042c3e9af76fe5be4ac8f1b8eeb.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 68, + 166, + 542, + 190 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 166, + 542, + 190 + ], + "spans": [ + { + "bbox": [ + 68, + 166, + 542, + 190 + ], + "type": "text", + "content": "Consequently, the distribution over the optimal arms under " + }, + { + "bbox": [ + 68, + 166, + 542, + 190 + ], + "type": "inline_equation", + "content": "p^*" + }, + { + "bbox": [ + 68, + 166, + 542, + 190 + ], + "type": "text", + "content": " matches the initial distribution " + }, + { + "bbox": [ + 68, + 166, + 542, + 190 + ], + "type": "inline_equation", + "content": "p_0" + }, + { + "bbox": [ + 68, + 166, + 542, + 190 + ], + "type": "text", + "content": " restricted to these arms and renormalized." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 68, + 203, + 541, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 203, + 541, + 228 + ], + "spans": [ + { + "bbox": [ + 68, + 203, + 541, + 228 + ], + "type": "text", + "content": "Proof. Using policy gradient theorem, we know that the converged policy " + }, + { + "bbox": [ + 68, + 203, + 541, + 228 + ], + "type": "inline_equation", + "content": "p^*" + }, + { + "bbox": [ + 68, + 203, + 541, + 228 + ], + "type": "text", + "content": " and corresponding parameter " + }, + { + "bbox": [ + 68, + 203, + 541, + 228 + ], + "type": "inline_equation", + "content": "\\theta^*" + }, + { + "bbox": [ + 68, + 203, + 541, + 228 + ], + "type": "text", + "content": " satisfy that," + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 213, + 233, + 397, + 270 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 213, + 233, + 397, + 270 + ], + "spans": [ + { + "bbox": [ + 213, + 233, + 397, + 270 + ], + "type": "interline_equation", + "content": "\\nabla_ {\\theta} \\left[ \\sum_ {i = 1} ^ {K + 1} r _ {i} p _ {i} + \\beta \\mathrm {K L} \\left(p | p ^ {0}\\right) \\right] \\Bigg | _ {\\theta = \\theta^ {*}} = 0", + "image_path": "083eced91fb46849603d1ad72fa239d4acbfb899ad82936da65f408e5a04fcc8.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 68, + 280, + 219, + 293 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 280, + 219, + 293 + ], + "spans": [ + { + "bbox": [ + 68, + 280, + 219, + 293 + ], + "type": "text", + "content": "This then suggests that for any " + }, + { + "bbox": [ + 68, + 280, + 219, + 293 + ], + "type": "inline_equation", + "content": "k" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 173, + 299, + 437, + 334 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 173, + 299, + 437, + 334 + ], + "spans": [ + { + "bbox": [ + 173, + 299, + 437, + 334 + ], + "type": "interline_equation", + "content": "r _ {k} p _ {k} ^ {*} - p _ {k} ^ {*} \\sum_ {i = 1} ^ {K + 1} r _ {i} ^ {*} p _ {i} ^ {*} + \\beta \\sum_ {i = 1} ^ {K + 1} \\nabla_ {\\theta_ {k}} [ p _ {i} \\log p _ {i} - p _ {i} \\log p _ {i} ^ {0} ] = 0", + "image_path": "cc05d29f10029f117c9149098f2eea1f4f41e939ebff5641009f894692dd280d.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 68, + 345, + 163, + 357 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 345, + 163, + 357 + ], + "spans": [ + { + "bbox": [ + 68, + 345, + 163, + 357 + ], + "type": "text", + "content": "This is equivalent to" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 141, + 363, + 468, + 397 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 363, + 468, + 397 + ], + "spans": [ + { + "bbox": [ + 141, + 363, + 468, + 397 + ], + "type": "interline_equation", + "content": "r _ {k} p _ {k} ^ {*} - p _ {k} ^ {*} \\sum_ {i = 1} ^ {K + 1} r _ {i} ^ {*} p _ {i} ^ {*} + \\beta \\sum_ {i = 1} ^ {K + 1} (\\mathbf {1} (i = k) - p _ {k} ^ {*}) p _ {i} ^ {*} (\\log p _ {i} ^ {*} + 1 - \\log p _ {i} ^ {0}) = 0", + "image_path": "ff593e0e957fad7b1f7dc96059bb205874bd6f33868700925b2f9d68f785835d.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 68, + 409, + 126, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 409, + 126, + 422 + ], + "spans": [ + { + "bbox": [ + 68, + 409, + 126, + 422 + ], + "type": "text", + "content": "Simplifying" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 140, + 442, + 469, + 477 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 442, + 469, + 477 + ], + "spans": [ + { + "bbox": [ + 140, + 442, + 469, + 477 + ], + "type": "interline_equation", + "content": "r _ {k} + \\beta (\\log p _ {k} ^ {*} + 1 - \\log p _ {0}) = \\sum_ {i = 1} ^ {K + 1} r _ {i} ^ {*} p _ {i} ^ {*} + \\beta \\sum_ {i = 1} ^ {K + 1} p _ {i} ^ {*} (\\log p _ {i} ^ {*} + 1 - \\log p _ {i} ^ {0})", + "image_path": "010df19603f6a8b8aed4c150ff7104474ebf1313d06df35b56b879fe8d0142f9.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 67, + 490, + 542, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 490, + 542, + 521 + ], + "spans": [ + { + "bbox": [ + 67, + 490, + 542, + 521 + ], + "type": "text", + "content": "For all " + }, + { + "bbox": [ + 67, + 490, + 542, + 521 + ], + "type": "inline_equation", + "content": "k \\in [K]" + }, + { + "bbox": [ + 67, + 490, + 542, + 521 + ], + "type": "text", + "content": ", we know that " + }, + { + "bbox": [ + 67, + 490, + 542, + 521 + ], + "type": "inline_equation", + "content": "r_k" + }, + { + "bbox": [ + 67, + 490, + 542, + 521 + ], + "type": "text", + "content": " is equivalent, therefore, " + }, + { + "bbox": [ + 67, + 490, + 542, + 521 + ], + "type": "inline_equation", + "content": "\\frac{p_k^*(i)}{p_0^*(i)}" + }, + { + "bbox": [ + 67, + 490, + 542, + 521 + ], + "type": "text", + "content": " is a constant for " + }, + { + "bbox": [ + 67, + 490, + 542, + 521 + ], + "type": "inline_equation", + "content": "k \\in [K]" + }, + { + "bbox": [ + 67, + 490, + 542, + 521 + ], + "type": "text", + "content": ", concluding our proof." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 69, + 534, + 192, + 547 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 534, + 192, + 547 + ], + "spans": [ + { + "bbox": [ + 69, + 534, + 192, + 547 + ], + "type": "text", + "content": "C.5 Technical Lemma" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 68, + 558, + 279, + 571 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 558, + 279, + 571 + ], + "spans": [ + { + "bbox": [ + 68, + 558, + 279, + 571 + ], + "type": "text", + "content": "Lemma C.9. For " + }, + { + "bbox": [ + 68, + 558, + 279, + 571 + ], + "type": "inline_equation", + "content": "x\\in \\mathbb{R}" + }, + { + "bbox": [ + 68, + 558, + 279, + 571 + ], + "type": "inline_equation", + "content": "|x| < C" + }, + { + "bbox": [ + 68, + 558, + 279, + 571 + ], + "type": "text", + "content": " , it holds that" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 247, + 577, + 363, + 592 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 247, + 577, + 363, + 592 + ], + "spans": [ + { + "bbox": [ + 247, + 577, + 363, + 592 + ], + "type": "interline_equation", + "content": "\\exp (x) > 1 + x + A _ {C} x ^ {2}", + "image_path": "106be94ca90f21b22c1046ac94af48a64aa8412f6ffde8449c502aa85c296ee4.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 69, + 597, + 185, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 597, + 185, + 616 + ], + "spans": [ + { + "bbox": [ + 69, + 597, + 185, + 616 + ], + "type": "text", + "content": "here " + }, + { + "bbox": [ + 69, + 597, + 185, + 616 + ], + "type": "inline_equation", + "content": "A_{C} = \\frac{\\exp(-C) + C - 1}{C^{2}}" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 68, + 628, + 446, + 645 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 628, + 446, + 645 + ], + "spans": [ + { + "bbox": [ + 68, + 628, + 446, + 645 + ], + "type": "text", + "content": "Proof. Define " + }, + { + "bbox": [ + 68, + 628, + 446, + 645 + ], + "type": "inline_equation", + "content": "g(x) = \\frac{\\exp(x) - 1 - x}{x^2}" + }, + { + "bbox": [ + 68, + 628, + 446, + 645 + ], + "type": "text", + "content": ", this function monotonically increases when " + }, + { + "bbox": [ + 68, + 628, + 446, + 645 + ], + "type": "inline_equation", + "content": "x < 0" + }, + { + "bbox": [ + 68, + 628, + 446, + 645 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 69, + 26, + 263, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 26, + 263, + 38 + ], + "spans": [ + { + "bbox": [ + 69, + 26, + 263, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 746, + 310, + 756 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 746, + 310, + 756 + ], + "spans": [ + { + "bbox": [ + 299, + 746, + 310, + 756 + ], + "type": "text", + "content": "21" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 20 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 76, + 274, + 92 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 76, + 274, + 92 + ], + "spans": [ + { + "bbox": [ + 69, + 76, + 274, + 92 + ], + "type": "text", + "content": "D Open-Thoughts Evaluation" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 104, + 541, + 142 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 104, + 541, + 142 + ], + "spans": [ + { + "bbox": [ + 67, + 104, + 541, + 142 + ], + "type": "text", + "content": "We finetune Qwen2.5-7B-Instruct over OpenThoughts-114k for 5 epochs using BF16 and AdamW and hyperparameters lr=1e-5, bs=128, warmup=150 steps. We sample 40 reasoning traces with temperature set to 0.7 for each of the 30 problems in AIME24. Then we evaluate the following quantities." + } + ] + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 119, + 172, + 242, + 291 + ], + "blocks": [ + { + "bbox": [ + 246, + 152, + 360, + 165 + ], + "lines": [ + { + "bbox": [ + 246, + 152, + 360, + 165 + ], + "spans": [ + { + "bbox": [ + 246, + 152, + 360, + 165 + ], + "type": "text", + "content": "Competition Math (AIME24)" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 119, + 172, + 242, + 291 + ], + "lines": [ + { + "bbox": [ + 119, + 172, + 242, + 291 + ], + "spans": [ + { + "bbox": [ + 119, + 172, + 242, + 291 + ], + "type": "image", + "image_path": "73d34b55c39b755f68e6950c8eebdf29d21cb222617fda1fe97a55a0270a9208.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 245, + 172, + 366, + 290 + ], + "blocks": [ + { + "bbox": [ + 245, + 172, + 366, + 290 + ], + "lines": [ + { + "bbox": [ + 245, + 172, + 366, + 290 + ], + "spans": [ + { + "bbox": [ + 245, + 172, + 366, + 290 + ], + "type": "image", + "image_path": "5aa59d6e2aba258b1051ce25b0904b17cd2e7490d5e852f24f61c5460a902111.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 370, + 172, + 492, + 291 + ], + "blocks": [ + { + "bbox": [ + 370, + 172, + 492, + 291 + ], + "lines": [ + { + "bbox": [ + 370, + 172, + 492, + 291 + ], + "spans": [ + { + "bbox": [ + 370, + 172, + 492, + 291 + ], + "type": "image", + "image_path": "b920292dcecb0dc0a1bc2d82f9a4af452fedf7363369eff449048c35d2f2d1b2.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 68, + 301, + 542, + 340 + ], + "lines": [ + { + "bbox": [ + 68, + 301, + 542, + 340 + ], + "spans": [ + { + "bbox": [ + 68, + 301, + 542, + 340 + ], + "type": "text", + "content": "Figure 10: Pass@K Evaluated on AIME24 over OpenThoughts-114K SFT checkpoints. We plot the expected Pass@K ± SD. Note that improvements in Pass@K slows down while Pass@1 improves at a constant rate. Furthermore, the confidence interval of Pass@1 widens, meaning the variance increases during SFT." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 72, + 356, + 164, + 445 + ], + "blocks": [ + { + "bbox": [ + 72, + 356, + 164, + 445 + ], + "lines": [ + { + "bbox": [ + 72, + 356, + 164, + 445 + ], + "spans": [ + { + "bbox": [ + 72, + 356, + 164, + 445 + ], + "type": "image", + "image_path": "5667e64bdc969d25573bf150b13c2046ca2168b091045a68f3bfe49c56c2ea9e.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 454, + 542, + 491 + ], + "lines": [ + { + "bbox": [ + 67, + 454, + 542, + 491 + ], + "spans": [ + { + "bbox": [ + 67, + 454, + 542, + 491 + ], + "type": "text", + "content": "Figure 11: Histogram of Pass@1 over AIME24. Variance of Pass@1 increases over finetuning on OpenThoughts-114K. We note that since AIME24 only has 30 questions, the density plot may not be completely reliable." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 166, + 356, + 258, + 445 + ], + "blocks": [ + { + "bbox": [ + 166, + 356, + 258, + 445 + ], + "lines": [ + { + "bbox": [ + 166, + 356, + 258, + 445 + ], + "spans": [ + { + "bbox": [ + 166, + 356, + 258, + 445 + ], + "type": "image", + "image_path": "ce0b365e2d0cfdbc6033585c43605d8fc9a10bbe002ac2b4022da555660ffbd8.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 260, + 357, + 352, + 444 + ], + "blocks": [ + { + "bbox": [ + 260, + 357, + 352, + 444 + ], + "lines": [ + { + "bbox": [ + 260, + 357, + 352, + 444 + ], + "spans": [ + { + "bbox": [ + 260, + 357, + 352, + 444 + ], + "type": "image", + "image_path": "a68455b7785772599da90654e44c982cadd4ddd78f610dab8af5816a70e0c43c.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 354, + 357, + 445, + 444 + ], + "blocks": [ + { + "bbox": [ + 354, + 357, + 445, + 444 + ], + "lines": [ + { + "bbox": [ + 354, + 357, + 445, + 444 + ], + "spans": [ + { + "bbox": [ + 354, + 357, + 445, + 444 + ], + "type": "image", + "image_path": "25ebeb290d7259127374c9c05e2e0ce5075d7b65dfef8be8699ed49268e3b01a.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 449, + 357, + 539, + 444 + ], + "blocks": [ + { + "bbox": [ + 449, + 357, + 539, + 444 + ], + "lines": [ + { + "bbox": [ + 449, + 357, + 539, + 444 + ], + "spans": [ + { + "bbox": [ + 449, + 357, + 539, + 444 + ], + "type": "image", + "image_path": "3eb53d8c924bb92658ec5635c7b2ae63fb3273756bce235deb3ec5596f40bd57.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 166, + 510, + 443, + 659 + ], + "blocks": [ + { + "bbox": [ + 166, + 510, + 443, + 659 + ], + "lines": [ + { + "bbox": [ + 166, + 510, + 443, + 659 + ], + "spans": [ + { + "bbox": [ + 166, + 510, + 443, + 659 + ], + "type": "image", + "image_path": "bb1d37404a819e2ad64f45cefe623549253879023f3bf030ffe2894bde9a30af.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 68, + 671, + 541, + 697 + ], + "lines": [ + { + "bbox": [ + 68, + 671, + 541, + 697 + ], + "spans": [ + { + "bbox": [ + 68, + 671, + 541, + 697 + ], + "type": "text", + "content": "Figure 12: We plot the average number of unique answers sampled over the total number samples i.e. " + }, + { + "bbox": [ + 68, + 671, + 541, + 697 + ], + "type": "inline_equation", + "content": "\\left|\\left\\{y_{i}\\right\\}_{i=1}^{n}\\right| / n" + }, + { + "bbox": [ + 68, + 671, + 541, + 697 + ], + "type": "text", + "content": ". Model samples less diverse number of answers as SFT progresses." + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 69, + 26, + 264, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 26, + 264, + 38 + ], + "spans": [ + { + "bbox": [ + 69, + 26, + 264, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 746, + 311, + 756 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 746, + 311, + 756 + ], + "spans": [ + { + "bbox": [ + 299, + 746, + 311, + 756 + ], + "type": "text", + "content": "22" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 21 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 75, + 263, + 92 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 75, + 263, + 92 + ], + "spans": [ + { + "bbox": [ + 69, + 75, + 263, + 92 + ], + "type": "text", + "content": "E Interpolation Coefficients" + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 72, + 123, + 179, + 223 + ], + "blocks": [ + { + "bbox": [ + 72, + 123, + 179, + 223 + ], + "lines": [ + { + "bbox": [ + 72, + 123, + 179, + 223 + ], + "spans": [ + { + "bbox": [ + 72, + 123, + 179, + 223 + ], + "type": "image", + "image_path": "408e81cdfc67395009c30c39aad9c4a31fa71dc028099dd11d2d322e044cc302.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 187, + 122, + 294, + 222 + ], + "blocks": [ + { + "bbox": [ + 82, + 111, + 190, + 122 + ], + "lines": [ + { + "bbox": [ + 82, + 111, + 190, + 122 + ], + "spans": [ + { + "bbox": [ + 82, + 111, + 190, + 122 + ], + "type": "text", + "content": "WiSE-Step672 on MATH500" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 187, + 122, + 294, + 222 + ], + "lines": [ + { + "bbox": [ + 187, + 122, + 294, + 222 + ], + "spans": [ + { + "bbox": [ + 187, + 122, + 294, + 222 + ], + "type": "image", + "image_path": "5cc7d3e6e8dc1b3da5bcfeaac5e13c685f56aae0ab567a53aa222a84743494fd.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 302, + 121, + 408, + 222 + ], + "blocks": [ + { + "bbox": [ + 199, + 112, + 305, + 121 + ], + "lines": [ + { + "bbox": [ + 199, + 112, + 305, + 121 + ], + "spans": [ + { + "bbox": [ + 199, + 112, + 305, + 121 + ], + "type": "text", + "content": "WiSE-Step672 on MATH500" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 312, + 112, + 420, + 121 + ], + "lines": [ + { + "bbox": [ + 312, + 112, + 420, + 121 + ], + "spans": [ + { + "bbox": [ + 312, + 112, + 420, + 121 + ], + "type": "text", + "content": "WiSE-Step672 on MATH500" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 302, + 121, + 408, + 222 + ], + "lines": [ + { + "bbox": [ + 302, + 121, + 408, + 222 + ], + "spans": [ + { + "bbox": [ + 302, + 121, + 408, + 222 + ], + "type": "image", + "image_path": "b592a7e7ccb7f34cab0c0fe80f8b46299a170f5e015417c6a260678ca37e45d5.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 416, + 122, + 523, + 222 + ], + "blocks": [ + { + "bbox": [ + 427, + 112, + 534, + 121 + ], + "lines": [ + { + "bbox": [ + 427, + 112, + 534, + 121 + ], + "spans": [ + { + "bbox": [ + 427, + 112, + 534, + 121 + ], + "type": "text", + "content": "WiSE-Step672 on MATH500" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 416, + 122, + 523, + 222 + ], + "lines": [ + { + "bbox": [ + 416, + 122, + 523, + 222 + ], + "spans": [ + { + "bbox": [ + 416, + 122, + 523, + 222 + ], + "type": "image", + "image_path": "e15bf9634190a320923c26e9e93036be5f32c027f6a9256280b04a259db1412f.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 72, + 236, + 179, + 336 + ], + "blocks": [ + { + "bbox": [ + 84, + 226, + 190, + 236 + ], + "lines": [ + { + "bbox": [ + 84, + 226, + 190, + 236 + ], + "spans": [ + { + "bbox": [ + 84, + 226, + 190, + 236 + ], + "type": "text", + "content": "WiSE-Step896 on MATH500" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 72, + 236, + 179, + 336 + ], + "lines": [ + { + "bbox": [ + 72, + 236, + 179, + 336 + ], + "spans": [ + { + "bbox": [ + 72, + 236, + 179, + 336 + ], + "type": "image", + "image_path": "a8fef11dae9deb43d79b665e01086d9767e0568d82dec02ac4133daecf6e053a.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 187, + 236, + 294, + 336 + ], + "blocks": [ + { + "bbox": [ + 198, + 226, + 305, + 236 + ], + "lines": [ + { + "bbox": [ + 198, + 226, + 305, + 236 + ], + "spans": [ + { + "bbox": [ + 198, + 226, + 305, + 236 + ], + "type": "text", + "content": "WiSE-Step896 on MATH500" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 187, + 236, + 294, + 336 + ], + "lines": [ + { + "bbox": [ + 187, + 236, + 294, + 336 + ], + "spans": [ + { + "bbox": [ + 187, + 236, + 294, + 336 + ], + "type": "image", + "image_path": "b5a0036c7749a83fbea586e2ad026ccfe84a4e95cb087bdcfd3c7171f8cbc3f2.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 302, + 236, + 408, + 336 + ], + "blocks": [ + { + "bbox": [ + 313, + 226, + 419, + 236 + ], + "lines": [ + { + "bbox": [ + 313, + 226, + 419, + 236 + ], + "spans": [ + { + "bbox": [ + 313, + 226, + 419, + 236 + ], + "type": "text", + "content": "WiSE-Step896 on MATH500" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 302, + 236, + 408, + 336 + ], + "lines": [ + { + "bbox": [ + 302, + 236, + 408, + 336 + ], + "spans": [ + { + "bbox": [ + 302, + 236, + 408, + 336 + ], + "type": "image", + "image_path": "e0ef05e879f2cabc6cc375cd5dfc6c14399c3e8c637b35a2bdd2dcd0382c64de.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 416, + 236, + 523, + 336 + ], + "blocks": [ + { + "bbox": [ + 428, + 226, + 534, + 236 + ], + "lines": [ + { + "bbox": [ + 428, + 226, + 534, + 236 + ], + "spans": [ + { + "bbox": [ + 428, + 226, + 534, + 236 + ], + "type": "text", + "content": "WiSE-Step896 on MATH500" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 416, + 236, + 523, + 336 + ], + "lines": [ + { + "bbox": [ + 416, + 236, + 523, + 336 + ], + "spans": [ + { + "bbox": [ + 416, + 236, + 523, + 336 + ], + "type": "image", + "image_path": "cd383a929ba7e9a9865537886d40e7be10298f21f2a4eee59bb64f1c03895e08.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 72, + 350, + 182, + 450 + ], + "blocks": [ + { + "bbox": [ + 82, + 340, + 192, + 350 + ], + "lines": [ + { + "bbox": [ + 82, + 340, + 192, + 350 + ], + "spans": [ + { + "bbox": [ + 82, + 340, + 192, + 350 + ], + "type": "text", + "content": "WiSE-Step1120 on MATH500" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 72, + 350, + 182, + 450 + ], + "lines": [ + { + "bbox": [ + 72, + 350, + 182, + 450 + ], + "spans": [ + { + "bbox": [ + 72, + 350, + 182, + 450 + ], + "type": "image", + "image_path": "3d13f320227abed29d629f8ace04823e9059603f82a24fb7c3c0d25ac51e5eb4.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 460, + 541, + 499 + ], + "lines": [ + { + "bbox": [ + 67, + 460, + 541, + 499 + ], + "spans": [ + { + "bbox": [ + 67, + 460, + 541, + 499 + ], + "type": "text", + "content": "Figure 13: Pass@1 versus Pass@K of WiSEFT of Qwen-2.5-0.5B trained and evaluated on MATH500. We interpolate between model " + }, + { + "bbox": [ + 67, + 460, + 541, + 499 + ], + "type": "inline_equation", + "content": "\\pmb{w}_0" + }, + { + "bbox": [ + 67, + 460, + 541, + 499 + ], + "type": "text", + "content": " at Step 112 with " + }, + { + "bbox": [ + 67, + 460, + 541, + 499 + ], + "type": "inline_equation", + "content": "\\pmb{w}_t" + }, + { + "bbox": [ + 67, + 460, + 541, + 499 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 67, + 460, + 541, + 499 + ], + "type": "inline_equation", + "content": "t\\in [672,896,1120]" + }, + { + "bbox": [ + 67, + 460, + 541, + 499 + ], + "type": "text", + "content": " as " + }, + { + "bbox": [ + 67, + 460, + 541, + 499 + ], + "type": "inline_equation", + "content": "\\delta \\pmb{w}_0 + (1 - \\delta)\\pmb{w}_t" + }, + { + "bbox": [ + 67, + 460, + 541, + 499 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 67, + 460, + 541, + 499 + ], + "type": "inline_equation", + "content": "\\delta \\in [0.1,0.9]" + }, + { + "bbox": [ + 67, + 460, + 541, + 499 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_caption" + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 187, + 350, + 298, + 450 + ], + "blocks": [ + { + "bbox": [ + 196, + 340, + 306, + 350 + ], + "lines": [ + { + "bbox": [ + 196, + 340, + 306, + 350 + ], + "spans": [ + { + "bbox": [ + 196, + 340, + 306, + 350 + ], + "type": "text", + "content": "WiSE-Step1120 on MATH500" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 187, + 350, + 298, + 450 + ], + "lines": [ + { + "bbox": [ + 187, + 350, + 298, + 450 + ], + "spans": [ + { + "bbox": [ + 187, + 350, + 298, + 450 + ], + "type": "image", + "image_path": "d05652d4c9f3cc0229b825169a87f0a25576d63a99afa799f8969368cab3b996.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_body" + } + ], + "index": 21 + }, + { + "type": "image", + "bbox": [ + 302, + 350, + 412, + 450 + ], + "blocks": [ + { + "bbox": [ + 312, + 340, + 422, + 350 + ], + "lines": [ + { + "bbox": [ + 312, + 340, + 422, + 350 + ], + "spans": [ + { + "bbox": [ + 312, + 340, + 422, + 350 + ], + "type": "text", + "content": "WiSE-Step1120 on MATH500" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 302, + 350, + 412, + 450 + ], + "lines": [ + { + "bbox": [ + 302, + 350, + 412, + 450 + ], + "spans": [ + { + "bbox": [ + 302, + 350, + 412, + 450 + ], + "type": "image", + "image_path": "a79e505ca270a42946351e05193fde44a40c51342de249abe1125406caef19ef.jpg" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_body" + } + ], + "index": 23 + }, + { + "type": "image", + "bbox": [ + 416, + 350, + 526, + 450 + ], + "blocks": [ + { + "bbox": [ + 425, + 340, + 537, + 350 + ], + "lines": [ + { + "bbox": [ + 425, + 340, + 537, + 350 + ], + "spans": [ + { + "bbox": [ + 425, + 340, + 537, + 350 + ], + "type": "text", + "content": "WiSE-Step1120 on MATH500" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 416, + 350, + 526, + 450 + ], + "lines": [ + { + "bbox": [ + 416, + 350, + 526, + 450 + ], + "spans": [ + { + "bbox": [ + 416, + 350, + 526, + 450 + ], + "type": "image", + "image_path": "4970e2bbeb01a58d65034f7eee2d971620f82a124a24f05a15a0868fd8060784.jpg" + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_body" + } + ], + "index": 25 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 70, + 26, + 264, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 26, + 264, + 38 + ], + "spans": [ + { + "bbox": [ + 70, + 26, + 264, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 746, + 310, + 755 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 746, + 310, + 755 + ], + "spans": [ + { + "bbox": [ + 299, + 746, + 310, + 755 + ], + "type": "text", + "content": "23" + } + ] + } + ], + "index": 27 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 22 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 71, + 88, + 192, + 201 + ], + "blocks": [ + { + "bbox": [ + 71, + 88, + 192, + 201 + ], + "lines": [ + { + "bbox": [ + 71, + 88, + 192, + 201 + ], + "spans": [ + { + "bbox": [ + 71, + 88, + 192, + 201 + ], + "type": "image", + "image_path": "6415bf26abc61b3f3704f1e93f285ad0c1d640bbb79a1978e63f250e5f01c217.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 194, + 88, + 306, + 201 + ], + "blocks": [ + { + "bbox": [ + 194, + 88, + 306, + 201 + ], + "lines": [ + { + "bbox": [ + 194, + 88, + 306, + 201 + ], + "spans": [ + { + "bbox": [ + 194, + 88, + 306, + 201 + ], + "type": "image", + "image_path": "cee9d9f66f1ba05e20958f6280cd378d5414554e40cbe63513148f4d200be612.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 306, + 88, + 420, + 201 + ], + "blocks": [ + { + "bbox": [ + 306, + 88, + 420, + 201 + ], + "lines": [ + { + "bbox": [ + 306, + 88, + 420, + 201 + ], + "spans": [ + { + "bbox": [ + 306, + 88, + 420, + 201 + ], + "type": "image", + "image_path": "7e11ff93e0b785d9953e3d89bcd12390815907a6dc2f3d35716009e51d01b2a9.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 417, + 88, + 534, + 201 + ], + "blocks": [ + { + "bbox": [ + 417, + 88, + 534, + 201 + ], + "lines": [ + { + "bbox": [ + 417, + 88, + 534, + 201 + ], + "spans": [ + { + "bbox": [ + 417, + 88, + 534, + 201 + ], + "type": "image", + "image_path": "e2a337f3f3b316e21bd15dad207ede3c32034de3208f9ee58d3e8e8a316f2a94.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 75, + 204, + 187, + 316 + ], + "blocks": [ + { + "bbox": [ + 75, + 204, + 187, + 316 + ], + "lines": [ + { + "bbox": [ + 75, + 204, + 187, + 316 + ], + "spans": [ + { + "bbox": [ + 75, + 204, + 187, + 316 + ], + "type": "image", + "image_path": "c5f007038e4a8539395358cfc078786e75a2500ccebd39ea8dcc0d9f3edd38c9.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 190, + 205, + 301, + 316 + ], + "blocks": [ + { + "bbox": [ + 190, + 205, + 301, + 316 + ], + "lines": [ + { + "bbox": [ + 190, + 205, + 301, + 316 + ], + "spans": [ + { + "bbox": [ + 190, + 205, + 301, + 316 + ], + "type": "image", + "image_path": "8ab320b8b9b148e7007304b25bcea6941eee571cdcfb9f2bcb270cc915f41d2e.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 306, + 205, + 416, + 316 + ], + "blocks": [ + { + "bbox": [ + 306, + 205, + 416, + 316 + ], + "lines": [ + { + "bbox": [ + 306, + 205, + 416, + 316 + ], + "spans": [ + { + "bbox": [ + 306, + 205, + 416, + 316 + ], + "type": "image", + "image_path": "534a1915e7237bbe8eb59cd3e2becaa163ccc90f8b40a09cb87b02ff2f834f83.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 420, + 205, + 531, + 316 + ], + "blocks": [ + { + "bbox": [ + 420, + 205, + 531, + 316 + ], + "lines": [ + { + "bbox": [ + 420, + 205, + 531, + 316 + ], + "spans": [ + { + "bbox": [ + 420, + 205, + 531, + 316 + ], + "type": "image", + "image_path": "88a2aea4db6f0ceb07a3f515c45521b42aef420591197fc4ac31c9e750a6eb8c.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 76, + 320, + 183, + 433 + ], + "blocks": [ + { + "bbox": [ + 76, + 320, + 183, + 433 + ], + "lines": [ + { + "bbox": [ + 76, + 320, + 183, + 433 + ], + "spans": [ + { + "bbox": [ + 76, + 320, + 183, + 433 + ], + "type": "image", + "image_path": "d98f4a0d11c754910a2b645db67fdc99936a484cb60dcd83a0344a2437fc161b.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 190, + 320, + 298, + 433 + ], + "blocks": [ + { + "bbox": [ + 190, + 320, + 298, + 433 + ], + "lines": [ + { + "bbox": [ + 190, + 320, + 298, + 433 + ], + "spans": [ + { + "bbox": [ + 190, + 320, + 298, + 433 + ], + "type": "image", + "image_path": "5186f02cf14107fb5f45cd06a48b8a84b8d148a57fc1f02fb24e57f2de6f0b2c.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 306, + 320, + 421, + 433 + ], + "blocks": [ + { + "bbox": [ + 306, + 320, + 421, + 433 + ], + "lines": [ + { + "bbox": [ + 306, + 320, + 421, + 433 + ], + "spans": [ + { + "bbox": [ + 306, + 320, + 421, + 433 + ], + "type": "image", + "image_path": "6879e45cb82d6e4e3cb71ac588c9076b39549c4f56feda977179787fe237fef8.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 420, + 320, + 537, + 433 + ], + "blocks": [ + { + "bbox": [ + 420, + 320, + 537, + 433 + ], + "lines": [ + { + "bbox": [ + 420, + 320, + 537, + 433 + ], + "spans": [ + { + "bbox": [ + 420, + 320, + 537, + 433 + ], + "type": "image", + "image_path": "7c463a6b643c401de38db5cb6513d86e528960fd75d924839fc2a365a4fe7d82.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 76, + 437, + 183, + 548 + ], + "blocks": [ + { + "bbox": [ + 76, + 437, + 183, + 548 + ], + "lines": [ + { + "bbox": [ + 76, + 437, + 183, + 548 + ], + "spans": [ + { + "bbox": [ + 76, + 437, + 183, + 548 + ], + "type": "image", + "image_path": "210f985ab8344ab9024b0e8866757143d08c6a402d3b38801bfe06bad03d0471.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 190, + 437, + 298, + 548 + ], + "blocks": [ + { + "bbox": [ + 190, + 437, + 298, + 548 + ], + "lines": [ + { + "bbox": [ + 190, + 437, + 298, + 548 + ], + "spans": [ + { + "bbox": [ + 190, + 437, + 298, + 548 + ], + "type": "image", + "image_path": "97b56b7e63b48c137ae6e876983d6e3b63f26d408263648812f7955049209453.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 306, + 437, + 413, + 548 + ], + "blocks": [ + { + "bbox": [ + 306, + 437, + 413, + 548 + ], + "lines": [ + { + "bbox": [ + 306, + 437, + 413, + 548 + ], + "spans": [ + { + "bbox": [ + 306, + 437, + 413, + 548 + ], + "type": "image", + "image_path": "6583cb61c58420a22e2b481d3ecd6ab5badcb2086aeae20b3945609becb32131.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 420, + 437, + 537, + 548 + ], + "blocks": [ + { + "bbox": [ + 420, + 437, + 537, + 548 + ], + "lines": [ + { + "bbox": [ + 420, + 437, + 537, + 548 + ], + "spans": [ + { + "bbox": [ + 420, + 437, + 537, + 548 + ], + "type": "image", + "image_path": "76cd3796b40caac6ff8a02aec6cb5d704946a539355c961c1da0cde04fcc0f3b.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 76, + 552, + 183, + 665 + ], + "blocks": [ + { + "bbox": [ + 76, + 552, + 183, + 665 + ], + "lines": [ + { + "bbox": [ + 76, + 552, + 183, + 665 + ], + "spans": [ + { + "bbox": [ + 76, + 552, + 183, + 665 + ], + "type": "image", + "image_path": "9b0b5f9803b94a0b5f4b2b333482b5acab14a7b4396c642f98d4c63f944bbd16.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 675, + 541, + 715 + ], + "lines": [ + { + "bbox": [ + 67, + 675, + 541, + 715 + ], + "spans": [ + { + "bbox": [ + 67, + 675, + 541, + 715 + ], + "type": "text", + "content": "Figure 14: Pass@1 versus Pass@K of WiSEFT of Gemma-2-2B trained and evaluated on GSM8K. We interpolate between model " + }, + { + "bbox": [ + 67, + 675, + 541, + 715 + ], + "type": "inline_equation", + "content": "\\pmb{w}_0" + }, + { + "bbox": [ + 67, + 675, + 541, + 715 + ], + "type": "text", + "content": " at Step 171 with " + }, + { + "bbox": [ + 67, + 675, + 541, + 715 + ], + "type": "inline_equation", + "content": "\\pmb{w}_t" + }, + { + "bbox": [ + 67, + 675, + 541, + 715 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 67, + 675, + 541, + 715 + ], + "type": "inline_equation", + "content": "t \\in [342, 684, 1026, 1368, 1710]" + }, + { + "bbox": [ + 67, + 675, + 541, + 715 + ], + "type": "text", + "content": " as " + }, + { + "bbox": [ + 67, + 675, + 541, + 715 + ], + "type": "inline_equation", + "content": "\\delta \\pmb{w}_0 + (1 - \\delta) \\pmb{w}_t" + }, + { + "bbox": [ + 67, + 675, + 541, + 715 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 67, + 675, + 541, + 715 + ], + "type": "inline_equation", + "content": "\\delta \\in [0.05, 0.9]" + }, + { + "bbox": [ + 67, + 675, + 541, + 715 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_caption" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 190, + 552, + 298, + 665 + ], + "blocks": [ + { + "bbox": [ + 190, + 552, + 298, + 665 + ], + "lines": [ + { + "bbox": [ + 190, + 552, + 298, + 665 + ], + "spans": [ + { + "bbox": [ + 190, + 552, + 298, + 665 + ], + "type": "image", + "image_path": "4c43efb60dc7fd63fa7e0bf6fe8153abec532160cb2af6bb071e080da392c5f2.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 306, + 552, + 413, + 665 + ], + "blocks": [ + { + "bbox": [ + 306, + 552, + 413, + 665 + ], + "lines": [ + { + "bbox": [ + 306, + 552, + 413, + 665 + ], + "spans": [ + { + "bbox": [ + 306, + 552, + 413, + 665 + ], + "type": "image", + "image_path": "ad7003e3f0b25d46193c9fffc98906a772a124775a56cd8b98a55940824b50a2.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 420, + 552, + 537, + 665 + ], + "blocks": [ + { + "bbox": [ + 420, + 552, + 537, + 665 + ], + "lines": [ + { + "bbox": [ + 420, + 552, + 537, + 665 + ], + "spans": [ + { + "bbox": [ + 420, + 552, + 537, + 665 + ], + "type": "image", + "image_path": "b1061168966769f0a706a6cdefd0ca7df762aaf6f5cf2e53465bfdddcf6d261d.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + } + ], + "index": 20 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 70, + 26, + 264, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 26, + 264, + 38 + ], + "spans": [ + { + "bbox": [ + 70, + 26, + 264, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 746, + 311, + 756 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 746, + 311, + 756 + ], + "spans": [ + { + "bbox": [ + 299, + 746, + 311, + 756 + ], + "type": "text", + "content": "24" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 23 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 76, + 290, + 92 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 76, + 290, + 92 + ], + "spans": [ + { + "bbox": [ + 69, + 76, + 290, + 92 + ], + "type": "text", + "content": "F Measuring Diversity of Traces" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 104, + 541, + 129 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 104, + 541, + 129 + ], + "spans": [ + { + "bbox": [ + 67, + 104, + 541, + 129 + ], + "type": "text", + "content": "We measure the diversity of the 100 sampled traces of Gemma-2-2B across GSM8k test. We measure diversity in terms of 3 different measures." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 68, + 139, + 541, + 164 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 139, + 541, + 164 + ], + "spans": [ + { + "bbox": [ + 68, + 139, + 541, + 164 + ], + "type": "text", + "content": "Output Diversity The cardinality or number of unique answers in the set of all model outputs " + }, + { + "bbox": [ + 68, + 139, + 541, + 164 + ], + "type": "inline_equation", + "content": "\\left|\\{\\hat{y}_1,\\hat{y}_2,\\dots ,\\hat{y}_n\\}\\right|" + }, + { + "bbox": [ + 68, + 139, + 541, + 164 + ], + "type": "text", + "content": " over the total number of traces." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 167, + 542, + 216 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 167, + 542, + 216 + ], + "spans": [ + { + "bbox": [ + 69, + 167, + 542, + 216 + ], + "type": "text", + "content": "Operation Diversity In GSM8k, each intermediate step consists of basic arithmetic operations, e.g. " + }, + { + "bbox": [ + 69, + 167, + 542, + 216 + ], + "type": "inline_equation", + "content": "5 + 3 = 8" + }, + { + "bbox": [ + 69, + 167, + 542, + 216 + ], + "type": "text", + "content": ". We may simply map each of the traces to the sequence of arithmetic operations the model steps through, i.e. " + }, + { + "bbox": [ + 69, + 167, + 542, + 216 + ], + "type": "inline_equation", + "content": "r_i \\rightarrow [o_1, o_2, \\ldots, o_t]" + }, + { + "bbox": [ + 69, + 167, + 542, + 216 + ], + "type": "text", + "content": ". This mapping is extracted by code. Then, given this set, we measure unique sequence of operations over the number of total traces." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 68, + 218, + 542, + 244 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 218, + 542, + 244 + ], + "spans": [ + { + "bbox": [ + 68, + 218, + 542, + 244 + ], + "type": "text", + "content": "Semantic Diversity We measure the similarity of trace using cosine similarities between the text-embeddings (Bilmes, 2022; Yu et al., 2023)." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 255, + 294, + 269 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 255, + 294, + 269 + ], + "spans": [ + { + "bbox": [ + 69, + 255, + 294, + 269 + ], + "type": "text", + "content": "F.1 Does temperature increase diversity?" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 68, + 277, + 508, + 291 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 277, + 508, + 291 + ], + "spans": [ + { + "bbox": [ + 68, + 277, + 508, + 291 + ], + "type": "text", + "content": "Temperature does increase diversity, but it also increases the chances of sampling outlier answers." + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 69, + 26, + 264, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 26, + 264, + 38 + ], + "spans": [ + { + "bbox": [ + 69, + 26, + 264, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 746, + 311, + 756 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 746, + 311, + 756 + ], + "spans": [ + { + "bbox": [ + 299, + 746, + 311, + 756 + ], + "type": "text", + "content": "25" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 24 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 78, + 188, + 200, + 304 + ], + "blocks": [ + { + "bbox": [ + 78, + 188, + 200, + 304 + ], + "lines": [ + { + "bbox": [ + 78, + 188, + 200, + 304 + ], + "spans": [ + { + "bbox": [ + 78, + 188, + 200, + 304 + ], + "type": "image", + "image_path": "e5ccfdef5708eae2cdd50fb2c0053f33997475088945e324b1799671240c70ec.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 207, + 188, + 331, + 304 + ], + "blocks": [ + { + "bbox": [ + 207, + 188, + 331, + 304 + ], + "lines": [ + { + "bbox": [ + 207, + 188, + 331, + 304 + ], + "spans": [ + { + "bbox": [ + 207, + 188, + 331, + 304 + ], + "type": "image", + "image_path": "6505c814dcb35aa9757d8be7050bd89319b4a0aae6db8d965dfe1bf81985e105.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 239, + 312, + 370, + 323 + ], + "lines": [ + { + "bbox": [ + 239, + 312, + 370, + 323 + ], + "spans": [ + { + "bbox": [ + 239, + 312, + 370, + 323 + ], + "type": "text", + "content": "Diversity Across SFT [T=1.0]" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 336, + 188, + 459, + 304 + ], + "blocks": [ + { + "bbox": [ + 239, + 167, + 370, + 178 + ], + "lines": [ + { + "bbox": [ + 239, + 167, + 370, + 178 + ], + "spans": [ + { + "bbox": [ + 239, + 167, + 370, + 178 + ], + "type": "text", + "content": "Diversity Across SFT [T=0.8]" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 336, + 188, + 459, + 304 + ], + "lines": [ + { + "bbox": [ + 336, + 188, + 459, + 304 + ], + "spans": [ + { + "bbox": [ + 336, + 188, + 459, + 304 + ], + "type": "image", + "image_path": "9edcca763df460a84257bc718bc153f2f8f5a4c6a79bbf78ad4b859738b6d86e.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 463, + 180, + 529, + 293 + ], + "blocks": [ + { + "bbox": [ + 463, + 180, + 529, + 293 + ], + "lines": [ + { + "bbox": [ + 463, + 180, + 529, + 293 + ], + "spans": [ + { + "bbox": [ + 463, + 180, + 529, + 293 + ], + "type": "image", + "image_path": "6357c2ac829597a5cabbd3f6f2c13751aeaac97b2934f7e1144f19fda33b7246.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 78, + 334, + 200, + 448 + ], + "blocks": [ + { + "bbox": [ + 78, + 334, + 200, + 448 + ], + "lines": [ + { + "bbox": [ + 78, + 334, + 200, + 448 + ], + "spans": [ + { + "bbox": [ + 78, + 334, + 200, + 448 + ], + "type": "image", + "image_path": "7ad9bd5e57c1c23dbd6a2fe73d568d72272de9cf3ff807b8a8da06b7e3ec8421.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 208, + 334, + 332, + 448 + ], + "blocks": [ + { + "bbox": [ + 208, + 334, + 332, + 448 + ], + "lines": [ + { + "bbox": [ + 208, + 334, + 332, + 448 + ], + "spans": [ + { + "bbox": [ + 208, + 334, + 332, + 448 + ], + "type": "image", + "image_path": "c03ad321d119e163d7d652bd75fdd1be669582b2e192edd3f0c824e236fa1553.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 240, + 457, + 370, + 468 + ], + "lines": [ + { + "bbox": [ + 240, + 457, + 370, + 468 + ], + "spans": [ + { + "bbox": [ + 240, + 457, + 370, + 468 + ], + "type": "text", + "content": "Diversity Across SFT [T=1.5]" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 337, + 334, + 459, + 448 + ], + "blocks": [ + { + "bbox": [ + 337, + 334, + 459, + 448 + ], + "lines": [ + { + "bbox": [ + 337, + 334, + 459, + 448 + ], + "spans": [ + { + "bbox": [ + 337, + 334, + 459, + 448 + ], + "type": "image", + "image_path": "946d993abc5cb2f9296711897d897572593e58f08517be74488fe99906faa457.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 463, + 326, + 529, + 438 + ], + "blocks": [ + { + "bbox": [ + 463, + 326, + 529, + 438 + ], + "lines": [ + { + "bbox": [ + 463, + 326, + 529, + 438 + ], + "spans": [ + { + "bbox": [ + 463, + 326, + 529, + 438 + ], + "type": "image", + "image_path": "4cca06e5c218704714c26e86a6ac995e438bcee795019154e74edbfe68091d0f.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 78, + 479, + 200, + 594 + ], + "blocks": [ + { + "bbox": [ + 78, + 479, + 200, + 594 + ], + "lines": [ + { + "bbox": [ + 78, + 479, + 200, + 594 + ], + "spans": [ + { + "bbox": [ + 78, + 479, + 200, + 594 + ], + "type": "image", + "image_path": "6c18e77dd874f11856943ca580d37e31d9687c5a7be4bc53d7a94208e6f4079a.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 68, + 609, + 541, + 633 + ], + "lines": [ + { + "bbox": [ + 68, + 609, + 541, + 633 + ], + "spans": [ + { + "bbox": [ + 68, + 609, + 541, + 633 + ], + "type": "text", + "content": "Figure 15: Diversity of traces sampled with Temperature " + }, + { + "bbox": [ + 68, + 609, + 541, + 633 + ], + "type": "inline_equation", + "content": "\\in" + }, + { + "bbox": [ + 68, + 609, + 541, + 633 + ], + "type": "text", + "content": " {0.8, 1.0, 1.5} for Gemma-2-2B SFT checkpoints on GSM8k" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 208, + 479, + 331, + 594 + ], + "blocks": [ + { + "bbox": [ + 208, + 479, + 331, + 594 + ], + "lines": [ + { + "bbox": [ + 208, + 479, + 331, + 594 + ], + "spans": [ + { + "bbox": [ + 208, + 479, + 331, + 594 + ], + "type": "image", + "image_path": "445fd20e4c1373f0114643f0a40149f387735f7d61bc9ab734ce91f06f149ec6.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 337, + 473, + 459, + 594 + ], + "blocks": [ + { + "bbox": [ + 337, + 473, + 459, + 594 + ], + "lines": [ + { + "bbox": [ + 337, + 473, + 459, + 594 + ], + "spans": [ + { + "bbox": [ + 337, + 473, + 459, + 594 + ], + "type": "image", + "image_path": "c20602e164d649b2cede3ffcbe6d6d45d34e0fbfdb8701a97c0a3495fe22a13a.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 463, + 472, + 529, + 583 + ], + "blocks": [ + { + "bbox": [ + 463, + 472, + 529, + 583 + ], + "lines": [ + { + "bbox": [ + 463, + 472, + 529, + 583 + ], + "spans": [ + { + "bbox": [ + 463, + 472, + 529, + 583 + ], + "type": "image", + "image_path": "04a8123274c3fe4a7a0f718441ab029fcec04c65de463ce4d5f4f8ddb1113e96.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 70, + 26, + 264, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 26, + 264, + 38 + ], + "spans": [ + { + "bbox": [ + 70, + 26, + 264, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 746, + 311, + 755 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 746, + 311, + 755 + ], + "spans": [ + { + "bbox": [ + 299, + 746, + 311, + 755 + ], + "type": "text", + "content": "26" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 25 + }, + { + "para_blocks": [ + { + "bbox": [ + 68, + 76, + 541, + 103 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 76, + 541, + 103 + ], + "spans": [ + { + "bbox": [ + 68, + 76, + 541, + 103 + ], + "type": "text", + "content": "F.2 How well do token-level diverse decoding strategies compare with optimal strategy with oracle?" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 113, + 542, + 163 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 113, + 542, + 163 + ], + "spans": [ + { + "bbox": [ + 67, + 113, + 542, + 163 + ], + "type": "text", + "content": "Hyperparameter Tuning Details We grid search for optimal temperature for all baselines over " + }, + { + "bbox": [ + 67, + 113, + 542, + 163 + ], + "type": "inline_equation", + "content": "T = [0.8, 1.0, 1.2, 1.5, 1.8]" + }, + { + "bbox": [ + 67, + 113, + 542, + 163 + ], + "type": "text", + "content": ". For nucleus, we choose the best cutoff threshold between " + }, + { + "bbox": [ + 67, + 113, + 542, + 163 + ], + "type": "inline_equation", + "content": "[0.8, 0.9, 0.95]" + }, + { + "bbox": [ + 67, + 113, + 542, + 163 + ], + "type": "text", + "content": ". For min-p, we choose the best probability threshold between " + }, + { + "bbox": [ + 67, + 113, + 542, + 163 + ], + "type": "inline_equation", + "content": "[0.01, 0.05, 0.1]" + }, + { + "bbox": [ + 67, + 113, + 542, + 163 + ], + "type": "text", + "content": ". For tokenwise top-k, we choose best k between " + }, + { + "bbox": [ + 67, + 113, + 542, + 163 + ], + "type": "inline_equation", + "content": "[12, 25, 50]" + }, + { + "bbox": [ + 67, + 113, + 542, + 163 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 178, + 171, + 433, + 258 + ], + "blocks": [ + { + "bbox": [ + 178, + 171, + 433, + 258 + ], + "lines": [ + { + "bbox": [ + 178, + 171, + 433, + 258 + ], + "spans": [ + { + "bbox": [ + 178, + 171, + 433, + 258 + ], + "type": "table", + "html": "
Decoding StrategyPass@2Pass@4Pass@8
Naive0.5650.6660.760
Nucleus0.5660.6680.757
Min-p0.5660.6680.760
Top-k0.5630.6660.756
Top-k w/Oracle0.7600.8320.901
", + "image_path": "c826bd5a0ec5354340e0780dded03d7ef2d94af5b3717c2da25e8b37e585943e.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 178, + 300, + 433, + 386 + ], + "blocks": [ + { + "bbox": [ + 111, + 264, + 499, + 278 + ], + "lines": [ + { + "bbox": [ + 111, + 264, + 499, + 278 + ], + "spans": [ + { + "bbox": [ + 111, + 264, + 499, + 278 + ], + "type": "text", + "content": "Table 2: Best Pass@k of Sampling Strategies for Qwen-2.5-0.5B over SFT checkpoints" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 178, + 300, + 433, + 386 + ], + "lines": [ + { + "bbox": [ + 178, + 300, + 433, + 386 + ], + "spans": [ + { + "bbox": [ + 178, + 300, + 433, + 386 + ], + "type": "table", + "html": "
Decoding StrategyPass@2Pass@4Pass@8
Naive0.5470.6480.737
Nucleus0.5280.6170.694
Min-p0.5500.6550.744
Top-k0.5380.6460.738
Top-k w/Oracle0.7300.8140.878
", + "image_path": "68f30ce20b9b540bd97d34e0e8afd1dd50e712a9365bf753e3341f92825fd290.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 118, + 393, + 492, + 407 + ], + "lines": [ + { + "bbox": [ + 118, + 393, + 492, + 407 + ], + "spans": [ + { + "bbox": [ + 118, + 393, + 492, + 407 + ], + "type": "text", + "content": "Table 3: Pass@k of Sampling Strategies for Qwen-2.5-0.5B at Last SFT Checkpoint" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "text" + }, + { + "type": "image", + "bbox": [ + 72, + 426, + 537, + 552 + ], + "blocks": [ + { + "bbox": [ + 72, + 426, + 537, + 552 + ], + "lines": [ + { + "bbox": [ + 72, + 426, + 537, + 552 + ], + "spans": [ + { + "bbox": [ + 72, + 426, + 537, + 552 + ], + "type": "image", + "image_path": "c6ea5a13b67d1c489b5482fd1dc7e9c590db2bda35acfbb90c820e3cee9fbbba.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 562, + 542, + 624 + ], + "lines": [ + { + "bbox": [ + 67, + 562, + 542, + 624 + ], + "spans": [ + { + "bbox": [ + 67, + 562, + 542, + 624 + ], + "type": "text", + "content": "Figure 16: Pass@K over different Min-P thresholds " + }, + { + "bbox": [ + 67, + 562, + 542, + 624 + ], + "type": "inline_equation", + "content": "\\gamma \\in [0,0.3]" + }, + { + "bbox": [ + 67, + 562, + 542, + 624 + ], + "type": "text", + "content": " and temperatures " + }, + { + "bbox": [ + 67, + 562, + 542, + 624 + ], + "type": "inline_equation", + "content": "T\\in [1,1.6]" + }, + { + "bbox": [ + 67, + 562, + 542, + 624 + ], + "type": "text", + "content": " for Gemma2-2B finetuned on GSM8K. Generally, no min-p threshold paired with high temperature " + }, + { + "bbox": [ + 67, + 562, + 542, + 624 + ], + "type": "inline_equation", + "content": "\\mathrm{T} = 1.6" + }, + { + "bbox": [ + 67, + 562, + 542, + 624 + ], + "type": "text", + "content": " (in light green) is able to surpass the Pass@1 of " + }, + { + "bbox": [ + 67, + 562, + 542, + 624 + ], + "type": "inline_equation", + "content": "\\mathrm{T} = 1" + }, + { + "bbox": [ + 67, + 562, + 542, + 624 + ], + "type": "text", + "content": " with best min-p threshold (in orange). In other words, unlike WiSE-FT which increases both Pass@1 and Pass@K, Pass@1 tends to still decrease for the diverse decoding strategy of applying min-p with high temperature." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 69, + 26, + 264, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 26, + 264, + 38 + ], + "spans": [ + { + "bbox": [ + 69, + 26, + 264, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 746, + 310, + 756 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 746, + 310, + 756 + ], + "spans": [ + { + "bbox": [ + 299, + 746, + 310, + 756 + ], + "type": "text", + "content": "27" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 26 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 72, + 76, + 187, + 188 + ], + "blocks": [ + { + "bbox": [ + 72, + 76, + 187, + 188 + ], + "lines": [ + { + "bbox": [ + 72, + 76, + 187, + 188 + ], + "spans": [ + { + "bbox": [ + 72, + 76, + 187, + 188 + ], + "type": "image", + "image_path": "a63859f2d49730f3fc594d38e64daee690ae73ff3dfdfd4802371d22024209fe.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 189, + 76, + 304, + 187 + ], + "blocks": [ + { + "bbox": [ + 189, + 76, + 304, + 187 + ], + "lines": [ + { + "bbox": [ + 189, + 76, + 304, + 187 + ], + "spans": [ + { + "bbox": [ + 189, + 76, + 304, + 187 + ], + "type": "image", + "image_path": "e2ab7f724e76325f770f6dd199c9afc3e5801a548582d0e9ac5362497292c00e.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 310, + 76, + 422, + 187 + ], + "blocks": [ + { + "bbox": [ + 310, + 76, + 422, + 187 + ], + "lines": [ + { + "bbox": [ + 310, + 76, + 422, + 187 + ], + "spans": [ + { + "bbox": [ + 310, + 76, + 422, + 187 + ], + "type": "image", + "image_path": "528a40964d3bfda6c2aef41d322a1d87b86f248ba95412cafdaaf3724d5c8979.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 428, + 76, + 539, + 187 + ], + "blocks": [ + { + "bbox": [ + 428, + 76, + 539, + 187 + ], + "lines": [ + { + "bbox": [ + 428, + 76, + 539, + 187 + ], + "spans": [ + { + "bbox": [ + 428, + 76, + 539, + 187 + ], + "type": "image", + "image_path": "510bcdc6999edfdaf5ad51d42c4d837bb3ee069afcc24f8a26bd6ee5bc71c4e7.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 214, + 194, + 453, + 208 + ], + "blocks": [ + { + "bbox": [ + 214, + 194, + 453, + 208 + ], + "lines": [ + { + "bbox": [ + 214, + 194, + 453, + 208 + ], + "spans": [ + { + "bbox": [ + 214, + 194, + 453, + 208 + ], + "type": "image", + "image_path": "39f305f282f3aa2b8d837c2869699fbd2bb84a64fa117b53ed57a515d2e954a8.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 127, + 217, + 482, + 232 + ], + "lines": [ + { + "bbox": [ + 127, + 217, + 482, + 232 + ], + "spans": [ + { + "bbox": [ + 127, + 217, + 482, + 232 + ], + "type": "text", + "content": "Figure 17: Pass@k of Gemma-2-2B GSM8k Naive Sampling with Replacement" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 72, + 243, + 190, + 357 + ], + "blocks": [ + { + "bbox": [ + 72, + 243, + 190, + 357 + ], + "lines": [ + { + "bbox": [ + 72, + 243, + 190, + 357 + ], + "spans": [ + { + "bbox": [ + 72, + 243, + 190, + 357 + ], + "type": "image", + "image_path": "393c4e789004c9b1d25cabc33e6caa251b9a30ac8408881cef3e9408d407b11f.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 153, + 389, + 457, + 403 + ], + "lines": [ + { + "bbox": [ + 153, + 389, + 457, + 403 + ], + "spans": [ + { + "bbox": [ + 153, + 389, + 457, + 403 + ], + "type": "text", + "content": "Figure 18: Pass@k of Gemma-2-2B GSM8k Oracle Top K Sampling" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 192, + 243, + 306, + 357 + ], + "blocks": [ + { + "bbox": [ + 192, + 243, + 306, + 357 + ], + "lines": [ + { + "bbox": [ + 192, + 243, + 306, + 357 + ], + "spans": [ + { + "bbox": [ + 192, + 243, + 306, + 357 + ], + "type": "image", + "image_path": "746a4c2b336ab6d80695d60f5f6112ea89a75779153cb6d50e8f7c5219e462ab.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 309, + 244, + 422, + 357 + ], + "blocks": [ + { + "bbox": [ + 309, + 244, + 422, + 357 + ], + "lines": [ + { + "bbox": [ + 309, + 244, + 422, + 357 + ], + "spans": [ + { + "bbox": [ + 309, + 244, + 422, + 357 + ], + "type": "image", + "image_path": "1eb6b1fd37997ea936e23fe598e2d89e0cb3ea24361199d8c518915d5a76ffb0.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 425, + 244, + 539, + 357 + ], + "blocks": [ + { + "bbox": [ + 425, + 244, + 539, + 357 + ], + "lines": [ + { + "bbox": [ + 425, + 244, + 539, + 357 + ], + "spans": [ + { + "bbox": [ + 425, + 244, + 539, + 357 + ], + "type": "image", + "image_path": "d3bd9e86127b96e78ed2a923eb025ecabea92613c76f789c22579bde4d166df7.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 207, + 365, + 444, + 378 + ], + "blocks": [ + { + "bbox": [ + 207, + 365, + 444, + 378 + ], + "lines": [ + { + "bbox": [ + 207, + 365, + 444, + 378 + ], + "spans": [ + { + "bbox": [ + 207, + 365, + 444, + 378 + ], + "type": "image", + "image_path": "a3080dc079b0901c51c02399a1907b2e2c96b0a922a54f5a446ed2ca4860e645.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 72, + 422, + 187, + 542 + ], + "blocks": [ + { + "bbox": [ + 72, + 422, + 187, + 542 + ], + "lines": [ + { + "bbox": [ + 72, + 422, + 187, + 542 + ], + "spans": [ + { + "bbox": [ + 72, + 422, + 187, + 542 + ], + "type": "image", + "image_path": "07caf13e3b113102c5e105ffade1b5592d6b69e20a12f6d7be497607fbc435bf.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 189, + 422, + 304, + 541 + ], + "blocks": [ + { + "bbox": [ + 189, + 422, + 304, + 541 + ], + "lines": [ + { + "bbox": [ + 189, + 422, + 304, + 541 + ], + "spans": [ + { + "bbox": [ + 189, + 422, + 304, + 541 + ], + "type": "image", + "image_path": "fc1e852587bf0d908433faf87683b609d90596c02a702dd55504a960a15c609c.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 304, + 422, + 422, + 541 + ], + "blocks": [ + { + "bbox": [ + 304, + 422, + 422, + 541 + ], + "lines": [ + { + "bbox": [ + 304, + 422, + 422, + 541 + ], + "spans": [ + { + "bbox": [ + 304, + 422, + 422, + 541 + ], + "type": "image", + "image_path": "9e6d23e6964203140d9a54ab2c01c778ff9fc0a06702b556276de27f651dd276.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 422, + 422, + 539, + 541 + ], + "blocks": [ + { + "bbox": [ + 422, + 422, + 539, + 541 + ], + "lines": [ + { + "bbox": [ + 422, + 422, + 539, + 541 + ], + "spans": [ + { + "bbox": [ + 422, + 422, + 539, + 541 + ], + "type": "image", + "image_path": "010ca52fb4d6d2f4a74f158e3d4b742d0d60ff44e3344acc665ea390dcd0d87d.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 194, + 552, + 433, + 564 + ], + "blocks": [ + { + "bbox": [ + 194, + 552, + 433, + 564 + ], + "lines": [ + { + "bbox": [ + 194, + 552, + 433, + 564 + ], + "spans": [ + { + "bbox": [ + 194, + 552, + 433, + 564 + ], + "type": "image", + "image_path": "34da879b88c056d54890866c39e610bb90307c00176ff655d5e91b80505a2801.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 123, + 575, + 486, + 590 + ], + "lines": [ + { + "bbox": [ + 123, + 575, + 486, + 590 + ], + "spans": [ + { + "bbox": [ + 123, + 575, + 486, + 590 + ], + "type": "text", + "content": "Figure 19: Pass@k of Qwen-2.5-0.5B GSM8k Naive Sampling with Replacement" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_caption" + } + ], + "index": 17 + }, + { + "bbox": [ + 68, + 607, + 361, + 621 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 607, + 361, + 621 + ], + "spans": [ + { + "bbox": [ + 68, + 607, + 361, + 621 + ], + "type": "text", + "content": "F.3 Diversity Comparison Between SFT and WiSE-FT" + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 70, + 26, + 264, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 26, + 264, + 38 + ], + "spans": [ + { + "bbox": [ + 70, + 26, + 264, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 746, + 311, + 756 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 746, + 311, + 756 + ], + "spans": [ + { + "bbox": [ + 299, + 746, + 311, + 756 + ], + "type": "text", + "content": "28" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 27 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 72, + 121, + 190, + 240 + ], + "blocks": [ + { + "bbox": [ + 72, + 121, + 190, + 240 + ], + "lines": [ + { + "bbox": [ + 72, + 121, + 190, + 240 + ], + "spans": [ + { + "bbox": [ + 72, + 121, + 190, + 240 + ], + "type": "image", + "image_path": "056234a694a700f1c7f01a6e9ed20094d7cd0fdb69eb5de2b5a653d849348ef9.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 192, + 121, + 306, + 240 + ], + "blocks": [ + { + "bbox": [ + 192, + 121, + 306, + 240 + ], + "lines": [ + { + "bbox": [ + 192, + 121, + 306, + 240 + ], + "spans": [ + { + "bbox": [ + 192, + 121, + 306, + 240 + ], + "type": "image", + "image_path": "8a3d27f0b455df3c98d77c12e7837d76fbf9b29395a7db957519d3cc75142e50.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 306, + 121, + 423, + 240 + ], + "blocks": [ + { + "bbox": [ + 306, + 121, + 423, + 240 + ], + "lines": [ + { + "bbox": [ + 306, + 121, + 423, + 240 + ], + "spans": [ + { + "bbox": [ + 306, + 121, + 423, + 240 + ], + "type": "image", + "image_path": "db799a546fac2266a4fb6e884a368cf79f696cc523c048ac9f7712be215438ae.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 423, + 122, + 539, + 240 + ], + "blocks": [ + { + "bbox": [ + 423, + 122, + 539, + 240 + ], + "lines": [ + { + "bbox": [ + 423, + 122, + 539, + 240 + ], + "spans": [ + { + "bbox": [ + 423, + 122, + 539, + 240 + ], + "type": "image", + "image_path": "f3af3a04795cd7ec6e134e72ceb0fd20ab57c4f2f3d29acda347037690cecaa8.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 198, + 250, + 434, + 264 + ], + "blocks": [ + { + "bbox": [ + 198, + 250, + 434, + 264 + ], + "lines": [ + { + "bbox": [ + 198, + 250, + 434, + 264 + ], + "spans": [ + { + "bbox": [ + 198, + 250, + 434, + 264 + ], + "type": "image", + "image_path": "335ad53fb8c267ab9f4d31675ae0ac9c056c01642d53698f0a201571736eb81f.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 149, + 274, + 462, + 289 + ], + "lines": [ + { + "bbox": [ + 149, + 274, + 462, + 289 + ], + "spans": [ + { + "bbox": [ + 149, + 274, + 462, + 289 + ], + "type": "text", + "content": "Figure 20: Pass@k of Qwen-2.5-0.5B GSM8k Oracle Top K Sampling" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 110, + 387, + 499, + 504 + ], + "blocks": [ + { + "bbox": [ + 110, + 387, + 499, + 504 + ], + "lines": [ + { + "bbox": [ + 110, + 387, + 499, + 504 + ], + "spans": [ + { + "bbox": [ + 110, + 387, + 499, + 504 + ], + "type": "image", + "image_path": "ccfab5cfd7bae8ef782e079cda3aeda4adbe6f786122fc77799b75ec40133ee2.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 111, + 510, + 499, + 628 + ], + "blocks": [ + { + "bbox": [ + 111, + 510, + 499, + 628 + ], + "lines": [ + { + "bbox": [ + 111, + 510, + 499, + 628 + ], + "spans": [ + { + "bbox": [ + 111, + 510, + 499, + 628 + ], + "type": "image", + "image_path": "5c78239b846481b7bf9c97f9b381676efebb712f06044215ffc0828a1d3181dd.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 641, + 541, + 677 + ], + "lines": [ + { + "bbox": [ + 67, + 641, + 541, + 677 + ], + "spans": [ + { + "bbox": [ + 67, + 641, + 541, + 677 + ], + "type": "text", + "content": "Figure 21: Operation, Semantic, and Answer Diversity of Gemma-2-2B checkpoints of SFT over GSM8K versus the corresponding WiSE-FT variants (with the earliest checkpoint). We decode with temperature set to 1.0." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 70, + 26, + 264, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 26, + 264, + 38 + ], + "spans": [ + { + "bbox": [ + 70, + 26, + 264, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 746, + 311, + 756 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 746, + 311, + 756 + ], + "spans": [ + { + "bbox": [ + 299, + 746, + 311, + 756 + ], + "type": "text", + "content": "29" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 28 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 111, + 252, + 501, + 495 + ], + "blocks": [ + { + "bbox": [ + 111, + 252, + 501, + 495 + ], + "lines": [ + { + "bbox": [ + 111, + 252, + 501, + 495 + ], + "spans": [ + { + "bbox": [ + 111, + 252, + 501, + 495 + ], + "type": "image", + "image_path": "4546a62db2a2c93d5719df6b62375b8b3d804a1758924ed0422076a662d1a358.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 509, + 541, + 544 + ], + "lines": [ + { + "bbox": [ + 67, + 509, + 541, + 544 + ], + "spans": [ + { + "bbox": [ + 67, + 509, + 541, + 544 + ], + "type": "text", + "content": "Figure 22: Operation, Semantic, and Answer Diversity of Gemma-2-2B checkpoints of SFT over GSM8K versus the corresponding WiSE-FT variants (with the earliest checkpoint). We decode with temperature set to 1.6." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 70, + 26, + 264, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 26, + 264, + 38 + ], + "spans": [ + { + "bbox": [ + 70, + 26, + 264, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 746, + 311, + 756 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 746, + 311, + 756 + ], + "spans": [ + { + "bbox": [ + 300, + 746, + 311, + 756 + ], + "type": "text", + "content": "30" + } + ] + } + ], + "index": 3 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 29 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 76, + 231, + 90 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 76, + 231, + 90 + ], + "spans": [ + { + "bbox": [ + 69, + 76, + 231, + 90 + ], + "type": "text", + "content": "G Best of K Evaluation" + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 70, + 110, + 187, + 192 + ], + "blocks": [ + { + "bbox": [ + 70, + 110, + 187, + 192 + ], + "lines": [ + { + "bbox": [ + 70, + 110, + 187, + 192 + ], + "spans": [ + { + "bbox": [ + 70, + 110, + 187, + 192 + ], + "type": "image", + "image_path": "9e3fdc034bc19b8587b4804417cfbed97b363f7d3658230fe4584772068195a9.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 189, + 111, + 304, + 192 + ], + "blocks": [ + { + "bbox": [ + 189, + 111, + 304, + 192 + ], + "lines": [ + { + "bbox": [ + 189, + 111, + 304, + 192 + ], + "spans": [ + { + "bbox": [ + 189, + 111, + 304, + 192 + ], + "type": "image", + "image_path": "b44675a6666bda0b73ef9e200a9b1d1022ee221c3d4530367974a9e78cee2014.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 306, + 111, + 422, + 192 + ], + "blocks": [ + { + "bbox": [ + 306, + 111, + 422, + 192 + ], + "lines": [ + { + "bbox": [ + 306, + 111, + 422, + 192 + ], + "spans": [ + { + "bbox": [ + 306, + 111, + 422, + 192 + ], + "type": "image", + "image_path": "aeb10270cd0a472e542fdfa57315bbbb7b5ba41555291cba501c05e400478d37.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 424, + 111, + 539, + 192 + ], + "blocks": [ + { + "bbox": [ + 424, + 111, + 539, + 192 + ], + "lines": [ + { + "bbox": [ + 424, + 111, + 539, + 192 + ], + "spans": [ + { + "bbox": [ + 424, + 111, + 539, + 192 + ], + "type": "image", + "image_path": "b2d2e4fd5b348100cefd256746110d2df91d3244a48b747cbf5561ad703f3c6e.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 233, + 196, + 377, + 213 + ], + "blocks": [ + { + "bbox": [ + 233, + 196, + 377, + 213 + ], + "lines": [ + { + "bbox": [ + 233, + 196, + 377, + 213 + ], + "spans": [ + { + "bbox": [ + 233, + 196, + 377, + 213 + ], + "type": "image", + "image_path": "ec5aabdb8afc02d520db433b2ed880cfb460293ab4f514cf1cd416a0e05b8f01.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 68, + 222, + 542, + 248 + ], + "lines": [ + { + "bbox": [ + 68, + 222, + 542, + 248 + ], + "spans": [ + { + "bbox": [ + 68, + 222, + 542, + 248 + ], + "type": "text", + "content": "Figure 23: Best@K performance on MATH500 with ORM verifier, comparing different SFT and WiSE-FT checkpoints of Qwen-2.5-0.5B for " + }, + { + "bbox": [ + 68, + 222, + 542, + 248 + ], + "type": "inline_equation", + "content": "K = 2,4,8,32" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 71, + 266, + 231, + 388 + ], + "blocks": [ + { + "bbox": [ + 71, + 266, + 231, + 388 + ], + "lines": [ + { + "bbox": [ + 71, + 266, + 231, + 388 + ], + "spans": [ + { + "bbox": [ + 71, + 266, + 231, + 388 + ], + "type": "image", + "image_path": "814e37e4a8cfad542f088af46bbd948f458f6686fdea387a041191635dae743d.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 233, + 266, + 385, + 388 + ], + "blocks": [ + { + "bbox": [ + 233, + 266, + 385, + 388 + ], + "lines": [ + { + "bbox": [ + 233, + 266, + 385, + 388 + ], + "spans": [ + { + "bbox": [ + 233, + 266, + 385, + 388 + ], + "type": "image", + "image_path": "dcc8297332fec4d4f444504910cf3c945a0afaf6ec9a11be41dfb59a185c4df3.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 388, + 266, + 539, + 388 + ], + "blocks": [ + { + "bbox": [ + 388, + 266, + 539, + 388 + ], + "lines": [ + { + "bbox": [ + 388, + 266, + 539, + 388 + ], + "spans": [ + { + "bbox": [ + 388, + 266, + 539, + 388 + ], + "type": "image", + "image_path": "04d3dcc21b958936bdc9a1f91c07188c307c9efefc4ccbe3518792514baf7514.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 214, + 396, + 394, + 418 + ], + "blocks": [ + { + "bbox": [ + 214, + 396, + 394, + 418 + ], + "lines": [ + { + "bbox": [ + 214, + 396, + 394, + 418 + ], + "spans": [ + { + "bbox": [ + 214, + 396, + 394, + 418 + ], + "type": "image", + "image_path": "568239dfe0d053038302fc7fbde2c6756bc3312c7e80cec28090883679638505.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 71, + 421, + 228, + 542 + ], + "blocks": [ + { + "bbox": [ + 71, + 421, + 228, + 542 + ], + "lines": [ + { + "bbox": [ + 71, + 421, + 228, + 542 + ], + "spans": [ + { + "bbox": [ + 71, + 421, + 228, + 542 + ], + "type": "image", + "image_path": "860ad658417f481695762b3985aa86cc6c8648a27a1974383b58f81738a321ca.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 235, + 421, + 383, + 542 + ], + "blocks": [ + { + "bbox": [ + 235, + 421, + 383, + 542 + ], + "lines": [ + { + "bbox": [ + 235, + 421, + 383, + 542 + ], + "spans": [ + { + "bbox": [ + 235, + 421, + 383, + 542 + ], + "type": "image", + "image_path": "b0c17f52c3d8d9f710084b2e71cd83327cb690eb5a0d9b91600ddb7e86032d53.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 391, + 421, + 538, + 542 + ], + "blocks": [ + { + "bbox": [ + 391, + 421, + 538, + 542 + ], + "lines": [ + { + "bbox": [ + 391, + 421, + 538, + 542 + ], + "spans": [ + { + "bbox": [ + 391, + 421, + 538, + 542 + ], + "type": "image", + "image_path": "81519a54b17fc6c666684e274696d0ac2127e9fe4ff88d0d9326a87c6149a208.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 212, + 552, + 395, + 572 + ], + "blocks": [ + { + "bbox": [ + 212, + 552, + 395, + 572 + ], + "lines": [ + { + "bbox": [ + 212, + 552, + 395, + 572 + ], + "spans": [ + { + "bbox": [ + 212, + 552, + 395, + 572 + ], + "type": "image", + "image_path": "205af94455736f91afbef56faed86d6536d4884d2490f957f228fe6f9d772c60.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 584, + 542, + 620 + ], + "lines": [ + { + "bbox": [ + 67, + 584, + 542, + 620 + ], + "spans": [ + { + "bbox": [ + 67, + 584, + 542, + 620 + ], + "type": "text", + "content": "Figure 24: Best@K performance on MATH500 with ORM (Top) and Majority Vote (Bottom) for early, middle, and late SFT checkpoints and WiSE-FT counterparts, showing Qwen-2.5-0.5B's scaling across K values." + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + } + ], + "index": 15 + }, + { + "bbox": [ + 68, + 638, + 492, + 654 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 638, + 492, + 654 + ], + "spans": [ + { + "bbox": [ + 68, + 638, + 492, + 654 + ], + "type": "text", + "content": "H Diversity Collapse and WiSE-FT Results for the Coding Task" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 67, + 666, + 542, + 728 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 666, + 542, + 728 + ], + "spans": [ + { + "bbox": [ + 67, + 666, + 542, + 728 + ], + "type": "text", + "content": "To test whether coding tasks exhibit the same diversity collapse observed in reasoning benchmarks, we fine-tuned the Qwen2.5-coder-0.5B model for 10 epochs on the Magicoder-Evol-Instruct-110K dataset, following the Stage 2 SFT recipe from OpenCoder LLM. We then applied WiSE-FT by interpolating the weights of the second SFT checkpoint with the initial model using interpolation ratio 0.5. Both the original SFT checkpoints and their WiSE-FT counterparts were evaluated on HumanEval for pass@k." + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 69, + 26, + 264, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 26, + 264, + 38 + ], + "spans": [ + { + "bbox": [ + 69, + 26, + 264, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 746, + 310, + 756 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 746, + 310, + 756 + ], + "spans": [ + { + "bbox": [ + 299, + 746, + 310, + 756 + ], + "type": "text", + "content": "31" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 30 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 70, + 75, + 185, + 158 + ], + "blocks": [ + { + "bbox": [ + 70, + 75, + 185, + 158 + ], + "lines": [ + { + "bbox": [ + 70, + 75, + 185, + 158 + ], + "spans": [ + { + "bbox": [ + 70, + 75, + 185, + 158 + ], + "type": "image", + "image_path": "60a8b9a9af69b7127d68729341fe2361371efa2b3e4bacb8eec3f42fc9ec84a0.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 188, + 75, + 304, + 158 + ], + "blocks": [ + { + "bbox": [ + 188, + 75, + 304, + 158 + ], + "lines": [ + { + "bbox": [ + 188, + 75, + 304, + 158 + ], + "spans": [ + { + "bbox": [ + 188, + 75, + 304, + 158 + ], + "type": "image", + "image_path": "9dc1172db134cef8ae1e854cc6f68ebfe8e6c96aaeacb27c5b0c870ad9752a67.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 307, + 76, + 421, + 158 + ], + "blocks": [ + { + "bbox": [ + 307, + 76, + 421, + 158 + ], + "lines": [ + { + "bbox": [ + 307, + 76, + 421, + 158 + ], + "spans": [ + { + "bbox": [ + 307, + 76, + 421, + 158 + ], + "type": "image", + "image_path": "cafeaf2d7bb4404026a1ba3699040624b79be50df113c5458aaa97d41afc6c76.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 425, + 76, + 539, + 158 + ], + "blocks": [ + { + "bbox": [ + 425, + 76, + 539, + 158 + ], + "lines": [ + { + "bbox": [ + 425, + 76, + 539, + 158 + ], + "spans": [ + { + "bbox": [ + 425, + 76, + 539, + 158 + ], + "type": "image", + "image_path": "1e7d7a28f9f5517750d4bcda26579c66a321c8f7a9d9072a3c097f819faa0084.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 231, + 162, + 376, + 178 + ], + "blocks": [ + { + "bbox": [ + 231, + 162, + 376, + 178 + ], + "lines": [ + { + "bbox": [ + 231, + 162, + 376, + 178 + ], + "spans": [ + { + "bbox": [ + 231, + 162, + 376, + 178 + ], + "type": "image", + "image_path": "d837c799a3424c44d929865ee8c5c17f2da827b7e87b44e7380e31803ce3e9b3.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 188, + 541, + 213 + ], + "lines": [ + { + "bbox": [ + 67, + 188, + 541, + 213 + ], + "spans": [ + { + "bbox": [ + 67, + 188, + 541, + 213 + ], + "type": "text", + "content": "Figure 25: Best@K performance on MATH500 with majority voting, comparing different SFT and WiSE-FT checkpoints of Qwen-2.5-0.5B for " + }, + { + "bbox": [ + 67, + 188, + 541, + 213 + ], + "type": "inline_equation", + "content": "K = 2, 4, 8, 32" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 233, + 541, + 271 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 233, + 541, + 271 + ], + "spans": [ + { + "bbox": [ + 67, + 233, + 541, + 271 + ], + "type": "text", + "content": "We found that, much like in mathematical reasoning tasks, SFT on coding data indeed suffers from diversity collapse: although pass@1 steadily improves over epochs, pass@k begins to deteriorate. And WiSE-FT still improves performance and mitigates the diversity collapse." + } + ] + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 72, + 298, + 223, + 411 + ], + "blocks": [ + { + "bbox": [ + 211, + 281, + 402, + 293 + ], + "lines": [ + { + "bbox": [ + 211, + 281, + 402, + 293 + ], + "spans": [ + { + "bbox": [ + 211, + 281, + 402, + 293 + ], + "type": "text", + "content": "HumanEval - Pass@k Across SFT Checkpoints" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 72, + 298, + 223, + 411 + ], + "lines": [ + { + "bbox": [ + 72, + 298, + 223, + 411 + ], + "spans": [ + { + "bbox": [ + 72, + 298, + 223, + 411 + ], + "type": "image", + "image_path": "cb3ca09225ec7f2708663a63adf1bb7d1807c672a937ff5625e5894c7f467191.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 228, + 299, + 380, + 411 + ], + "blocks": [ + { + "bbox": [ + 228, + 299, + 380, + 411 + ], + "lines": [ + { + "bbox": [ + 228, + 299, + 380, + 411 + ], + "spans": [ + { + "bbox": [ + 228, + 299, + 380, + 411 + ], + "type": "image", + "image_path": "293bfc0fc27150b8eec52e2f179c7a8bee6d05de89eb6f954bb89407cf535e84.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 105, + 422, + 504, + 435 + ], + "lines": [ + { + "bbox": [ + 105, + 422, + 504, + 435 + ], + "spans": [ + { + "bbox": [ + 105, + 422, + 504, + 435 + ], + "type": "text", + "content": "Figure 26: Pass@K performance of SFT checkpoints on HumanEval (temperature = 1.0)." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 387, + 300, + 538, + 411 + ], + "blocks": [ + { + "bbox": [ + 387, + 300, + 538, + 411 + ], + "lines": [ + { + "bbox": [ + 387, + 300, + 538, + 411 + ], + "spans": [ + { + "bbox": [ + 387, + 300, + 538, + 411 + ], + "type": "image", + "image_path": "dc80c75a67d472177dcc63f0074f37ec267e332b7e7317accb544868294cd22e.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 72, + 470, + 223, + 582 + ], + "blocks": [ + { + "bbox": [ + 183, + 453, + 430, + 464 + ], + "lines": [ + { + "bbox": [ + 183, + 453, + 430, + 464 + ], + "spans": [ + { + "bbox": [ + 183, + 453, + 430, + 464 + ], + "type": "text", + "content": "HumanEval - Pass@k Across Checkpoints (SFT vs WiSE-FT)" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 72, + 470, + 223, + 582 + ], + "lines": [ + { + "bbox": [ + 72, + 470, + 223, + 582 + ], + "spans": [ + { + "bbox": [ + 72, + 470, + 223, + 582 + ], + "type": "image", + "image_path": "aa8a4b4c00fe197e0575e11b2fecb88604a4562cbe6fb459230d83e7172326d4.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 229, + 470, + 380, + 582 + ], + "blocks": [ + { + "bbox": [ + 229, + 470, + 380, + 582 + ], + "lines": [ + { + "bbox": [ + 229, + 470, + 380, + 582 + ], + "spans": [ + { + "bbox": [ + 229, + 470, + 380, + 582 + ], + "type": "image", + "image_path": "6f01ccaf5c53f26ffda8ffe2a095592c21beb991d051e6b3ecc692744b009663.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 71, + 592, + 537, + 605 + ], + "lines": [ + { + "bbox": [ + 71, + 592, + 537, + 605 + ], + "spans": [ + { + "bbox": [ + 71, + 592, + 537, + 605 + ], + "type": "text", + "content": "Figure 27: Comparison of pass@K for SFT checkpoints and their WiSE-FT counterparts at " + }, + { + "bbox": [ + 71, + 592, + 537, + 605 + ], + "type": "inline_equation", + "content": "k = 1" + }, + { + "bbox": [ + 71, + 592, + 537, + 605 + ], + "type": "text", + "content": ", 16, 64." + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 387, + 471, + 537, + 582 + ], + "blocks": [ + { + "bbox": [ + 387, + 471, + 537, + 582 + ], + "lines": [ + { + "bbox": [ + 387, + 471, + 537, + 582 + ], + "spans": [ + { + "bbox": [ + 387, + 471, + 537, + 582 + ], + "type": "image", + "image_path": "adce33339af950e6097d8809ac7898c79d536d03de13d3b42c8e47246adb4ef3.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 70, + 26, + 264, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 26, + 264, + 38 + ], + "spans": [ + { + "bbox": [ + 70, + 26, + 264, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 746, + 311, + 756 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 746, + 311, + 756 + ], + "spans": [ + { + "bbox": [ + 299, + 746, + 311, + 756 + ], + "type": "text", + "content": "32" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 31 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 73, + 277, + 531, + 521 + ], + "blocks": [ + { + "bbox": [ + 99, + 255, + 536, + 271 + ], + "lines": [ + { + "bbox": [ + 99, + 255, + 536, + 271 + ], + "spans": [ + { + "bbox": [ + 99, + 255, + 536, + 271 + ], + "type": "text", + "content": "HumanEval - Last Checkpoint (1700) Comparison: SFT vs WiSE-FT" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 73, + 277, + 531, + 521 + ], + "lines": [ + { + "bbox": [ + 73, + 277, + 531, + 521 + ], + "spans": [ + { + "bbox": [ + 73, + 277, + 531, + 521 + ], + "type": "image", + "image_path": "b5e95e0338cedd9b1f43533a5dbb16642aa6e174015a5f1dd91e1e8a66e77aa3.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 105, + 533, + 504, + 548 + ], + "lines": [ + { + "bbox": [ + 105, + 533, + 504, + 548 + ], + "spans": [ + { + "bbox": [ + 105, + 533, + 504, + 548 + ], + "type": "text", + "content": "Figure 28: Pass@K performance of the final SFT checkpoint versus its WiSE-FT variant." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 70, + 26, + 264, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 26, + 264, + 38 + ], + "spans": [ + { + "bbox": [ + 70, + 26, + 264, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 746, + 311, + 756 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 746, + 311, + 756 + ], + "spans": [ + { + "bbox": [ + 299, + 746, + 311, + 756 + ], + "type": "text", + "content": "33" + } + ] + } + ], + "index": 4 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 32 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2504_10xxx/2504.10479/71273ce6-5170-4939-8354-af535b974810_content_list.json b/data/2025/2504_10xxx/2504.10479/71273ce6-5170-4939-8354-af535b974810_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..f72cd8154790b50b76446581694c20ce2f03b17f --- /dev/null +++ b/data/2025/2504_10xxx/2504.10479/71273ce6-5170-4939-8354-af535b974810_content_list.json @@ -0,0 +1,2469 @@ +[ + { + "type": "text", + "text": "InternVL3: Exploring Advanced Training and Test-Time Recipes for Open-Source Multimodal Models", + "text_level": 1, + "bbox": [ + 153, + 122, + 843, + 172 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Jinguo Zhu $^{1*}$ , Weiyun Wang $^{5,1*†}$ , Zhe Chen $^{4,1*†}$ , Zhaoyang Liu $^{1*†}$ , Shenglong Ye $^{1*}$ , Lixin Gu $^{1*}$ , Hao Tian $^{2*}$ , Yuchen Duan $^{6,1*†}$ , Weijie Su $^{1}$ , Jie Shao $^{4,1†}$ , Zhangwei Gao $^{7,1†}$ , Erfei Cui $^{7,1†}$ , Xuehui Wang $^{7,1†}$ , Yue Cao $^{4,1†}$ , Yangzhou Liu $^{4,1†}$ , Xingguang Wei $^{1†}$ , Hongjie Zhang $^{1}$ , Haomin Wang $^{7,1†}$ , Weiye Xu $^{1†}$ , Hao Li $^{1†}$ , Jiahao Wang $^{1†}$ , Nianchen Deng $^{1}$ , Songze Li $^{1}$ , Yinan He $^{1}$ , Tan Jiang $^{2}$ , Jiapeng Luo $^{2}$ , Yi Wang $^{1}$ , Conghui He $^{1}$ , Botian Shi $^{1}$ , Xingcheng Zhang $^{1}$ , Wenqi Shao $^{1}$ , Junjun He $^{1}$ , Yingtong Xiong $^{1}$ , Wenwen Qu $^{1}$ , Peng Sun $^{1}$ , Penglong Jiao $^{1}$ , Han Lv $^{1}$ , Lijun Wu $^{1}$ , Kaipeng Zhang $^{1}$ , Huipeng Deng $^{1}$ , Jiaye Ge $^{1}$ , Kai Chen $^{1}$ , Limin Wang $^{4,1}$ , Min Dou $^{1}$ , Lewei Lu $^{2}$ , Xizhou Zhu $^{3,1}$ , Tong Lu $^{4}$ , Dahua Lin $^{6,1}$ , Yu Qiao $^{1}$ , Jifeng Dai $^{3,1‡}$ , Wenhai Wang $^{6,1‡}$", + "bbox": [ + 166, + 224, + 834, + 324 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{1}$ Shanghai AI Laboratory $^{2}$ SenseTime Research $^{3}$ Tsinghua University $^{4}$ Nanjing University $^{5}$ Fudan University $^{6}$ The Chinese University of Hong Kong $^{7}$ Shanghai Jiao Tong University", + "bbox": [ + 192, + 325, + 795, + 357 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Code: https://github.com/OpenGVLab/InternVL", + "bbox": [ + 313, + 369, + 683, + 383 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Model: https://huggingface.co/OpenGVLab/InternVL3-78B", + "bbox": [ + 271, + 383, + 725, + 396 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Data: https://huggingface.co/datasets/OpenGVLab/InternVL-Data", + "bbox": [ + 236, + 398, + 759, + 411 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 459, + 467, + 537, + 483 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "We introduce InternVL3, a significant advancement in the InternVL series featuring a native multimodal pre-training paradigm. Rather than adapting a text-only large language model (LLM) into a multimodal large language model (MLLM) that supports visual inputs, InternVL3 jointly acquires multimodal and linguistic capabilities from both diverse multimodal data and pure-text corpora during a single pre-training stage. This unified training paradigm effectively addresses the complexities and alignment challenges commonly encountered in conventional post-hoc training pipelines for MLLMs. To further improve performance and scalability, InternVL3 incorporates variable visual position encoding (V2PE) to support extended multimodal contexts, employs advanced post-training techniques such as supervised fine-tuning (SFT) and mixed preference optimization (MPO), and adopts test-time scaling strategies alongside an optimized training infrastructure. Extensive empirical evaluations demonstrate that InternVL3 delivers superior performance across a wide range of multi-modal tasks. In particular, InternVL3-78B achieves a score of 72.2 on the MMMU benchmark, setting a new state-of-the-art among open-source MLLMs. Its capabilities remain highly competitive with leading proprietary models, including ChatGPT-4o, Claude 3.5 Sonnet, and Gemini 2.5 Pro, while also maintaining strong pure-language proficiency. In pursuit of open-science principles, we will publicly release both the training data and model weights to foster further research and development in next-generation MLLMs.", + "bbox": [ + 192, + 500, + 803, + 750 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Introduction", + "text_level": 1, + "bbox": [ + 135, + 773, + 276, + 789 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Multimodal large language models (MLLMs) [32, 66, 121, 21, 19, 123, 68, 114, 97, 136, 71, 31, 85, 117, 18, 89, 105, 69] have recently achieved or even surpassed human-level performance in a broad spectrum of tasks, underscoring their potential as a significant stride toward artificial general intelligence (AGI). Yet, the majority of leading MLLMs—both open-source and proprietary—are adapted from text-only large language models through sophisticated multi-stage pipelines [21, 19, 18, 5, 121, 7]. These “post-hoc” approaches are built upon the", + "bbox": [ + 133, + 804, + 862, + 876 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.10479v3 [cs.CV] 19 Apr 2025", + "bbox": [ + 22, + 262, + 60, + 708 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "* equal contribution; † interns at OpenGVLab, Shanghai AI Laboratory; corresponding authors (daijifeng@tsinghua.edu.cn, wangwenhai@pjlab.org.cn).", + "bbox": [ + 135, + 883, + 625, + 911 + ], + "page_idx": 0 + }, + { + "type": "table", + "img_path": "images/9263bfa8ce115702226a96743252c258021d6cf36e1e18edfe8bbaaebc0a2882.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
InternVL2.5 78BInternVL3 8BInternVL3 78BQwen2.5-VL 72BOther Open-Source MLLMsClaude-3.5 SonnetChatGPT-4o-latestGemini-2.5 Pro
Model WeightsXXX
Training DataXX-XXX
MMMU Multi-discipline70.1%65.6%72.2% (2.1 ↑)70.2%64.5%66.4%72.9%74.7%
MathVista Math72.3%75.2%79.6% (7.3 ↑)74.8%70.5%65.1%71.6%80.9%
AI2D Diagrams89.1%85.2%89.7% (0.6 ↑)88.7%88.1%81.2%86.3%89.5%
ChartQA Charts88.3%86.6%89.7% (1.4 ↑)89.5%88.3%90.8%--
DocVQA Documents95.1%92.7%95.4% (0.3 ↑)96.4%96.5%95.2%--
InfographicVQA infographics84.1%76.8%85.2% (1.1 ↑)87.3%84.7%74.3%--
HallusionBench Hallucination57.4%49.9%59.1% (1.7 ↑)55.2%58.1%55.5%57.0%64.1%
OCRBench OCR854880906 (52↑)885877-894862
LongVideoBench Video63.6%58.8%65.7%(2.1↑)60.7%61.3%---
", + "bbox": [ + 140, + 90, + 857, + 426 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Figure 1: Multimodal performance of the InternVL series and other advanced MLLMs. The InternVL series has consistently exhibited progressive enhancements in multimodal capabilities. The newly released InternVL3 significantly outperforms existing open-source MLLMs. Moreover, even in comparison with state-of-the-art closed-source commercial models, InternVL3 continues to demonstrate highly competitive performance.", + "bbox": [ + 135, + 434, + 864, + 492 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "original text-based pre-training processes, thereby introducing alignment challenges when integrating additional modalities such as vision. In practice, bridging modality gaps often necessitates incorporating auxiliary data from specialized domains (e.g., optical character recognition scenarios) and intricate parameter-freezing or multi-stage fine-tuning schedules to ensure that core linguistic capacities remain uncompromised [73, 7, 5, 18]. Such resource-intensive strategies highlight the need for more efficient multimodal training paradigms.", + "bbox": [ + 133, + 531, + 861, + 602 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In this report, we introduce InternVL3, the latest milestone in the InternVL series [21, 20, 18], which is distinguished by its native multimodal pre-training strategy. Rather than first pre-training a text-only large language model and subsequently retrofitting it via multimodal alignment to support visual processing, InternVL3 learns multimodal capabilities from the pre-training stage by jointly exposed to both text-only corpora and diverse multimodal datasets. This unified approach enables the model to simultaneously acquire linguistic and multimodal competencies in a more efficient and integrated manner.", + "bbox": [ + 133, + 607, + 859, + 691 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "InternVL3 further excels through multiple innovations that reinforce both performance and scalability. We employ a variable visual position encoding (V2PE) mechanism [42] to accommodate longer multimodal contexts. Furthermore, advanced post-training strategies—comprising supervised fine-tuning (SFT) and mixed preference optimization (MPO) [124]—together with test-time scaling strategies [125] and an optimized training infrastructure [15], significantly enhance InternVL3's efficiency and performance.", + "bbox": [ + 133, + 696, + 861, + 768 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Comprehensive empirical evaluations demonstrate that InternVL3 surpasses its predecessors (e.g., InternVL2.5 [18]) across a wide range of tasks, including multi-discipline reasoning, document understanding, multi-image / video understanding, real-world comprehension, multimodal hallucination detection, visual grounding, and multilingual capabilities. Notably, by incorporating expanded domain-specific datasets, InternVL3 also exhibits marked improvements in tool usage, GUI agents, industrial image analysis, and spatial reasoning, thus substantially extending the multimodal scenarios addressed by the InternVL series. It proves highly competitive with other open-source MLLMs such as Qwen2.5-VL [7] and remains on par with closed-source models (e.g., ChatGPT-4o [98], Claude-3.5 Sonnet [3], Gemini-2.5 Pro [117]). This versatility is evidenced by its 72.2-point performance on the MMMU benchmark [141], setting a new standard among open-source MLLMs. Additionally, InternVL3 demonstrates language capabilities comparable to other advanced LLMs of similar scale.", + "bbox": [ + 133, + 772, + 864, + 912 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/2543074654573cacd7d214bda38adddfa3eca6683b87e7e4c38e54f1a78f3548.jpg", + "image_caption": [ + "Figure 2: Performance of various MLLMs on the OpenCompass multimodal academic leaderboard. The enhanced InternVL series—InternVL3—demonstrates outstanding multimodal capabilities, significantly outperforming both the Qwen2.5-VL series and closed-source models such as Step-1o, GLM-4v-Plus, and GPT-4o. Remarkably, InternVL3-78B also remains highly competitive with the state-of-the-art Gemini-2.5-Pro." + ], + "image_footnote": [], + "bbox": [ + 140, + 89, + 851, + 414 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "To foster further advancements within the open-source community, we will release the training data1 and model weights alongside this work, thereby ensuring transparency and reproducibility for the continued development of next-generation MLLMs.", + "bbox": [ + 133, + 503, + 859, + 547 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2 InternVL3", + "text_level": 1, + "bbox": [ + 135, + 565, + 263, + 583 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Building upon the prior InternVL series [21, 19, 18], we propose InternVL3, a new generation within the InternVL model family. InternVL3 is specifically designed to streamline the training pipeline while significantly enhancing multimodal capabilities. In this section, we first delineate the core components of InternVL3, including its model architecture, training procedures, test-time scaling strategies, and infrastructure-level optimizations.", + "bbox": [ + 133, + 595, + 859, + 654 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.1 Model Architecture", + "text_level": 1, + "bbox": [ + 135, + 667, + 316, + 684 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The architecture of InternVL3 follows the same general framework as its predecessors, adhering to the \"ViTMLP-LLM\" paradigm [66, 18, 41, 20]. Detailed architectural specifications are summarized in Table 1.", + "bbox": [ + 133, + 694, + 859, + 722 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Although the native pre-training paradigm discussed later could enable training MLLMs from scratch, we choose to initialize the ViT and LLM components with pre-trained model weights to reduce computational costs. The vision encoder is available in two configurations: InternViT-300M and InternViT-6B. For the language model, we leverage pre-trained large language models (LLMs), specifically the Qwen2.5 series and InternLM3-8B. Importantly, our LLM components are initialized solely from pre-trained base models, without employing instruction-tuned variants. The multilayer perceptron (MLP) utilized in the model is a two-layer network with random initialization. In line with the approach taken in InternVL2.5, InternVL3 incorporates a pixel unshuffle operation to enhance scalability for processing high-resolution images. This operation reduces the visual token count to one-quarter of its original value, representing each $448 \\times 448$ image tile with 256 visual tokens.", + "bbox": [ + 133, + 728, + 859, + 854 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Variable Visual Position Encoding. InternVL3 also integrates the Variable Visual Position Encoding (V2PE) [42], which utilizes smaller, more flexible position increments for visual tokens. This modifica", + "bbox": [ + 133, + 859, + 859, + 888 + ], + "page_idx": 2 + }, + { + "type": "page_footnote", + "text": "1The open-source data are being organized, and a comprehensive list will be included in a future revision of this report.", + "bbox": [ + 156, + 897, + 857, + 912 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 935, + 503, + 946 + ], + "page_idx": 2 + }, + { + "type": "table", + "img_path": "images/6bc83e896e053550faf383149197753b306b3c76e1820e010125d7ed48a15de9.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Model Name#ParamVision EncoderLanguage ModelOpenCompass Academic
InternVL3-1B0.9BInternViT-300M-448px-V2.5Qwen2.5-0.5B57.4
InternVL3-2B1.9BInternViT-300M-448px-V2.5Qwen2.5-1.5B63.9
InternVL3-8B8.1BInternViT-300M-448px-V2.5Qwen2.5-7B73.3
InternVL3-9B9.2BInternViT-300M-448px-V2.5InternLM3-8B72.4
InternVL3-14B15.1BInternViT-300M-448px-V2.5Qwen2.5-14B75.5
InternVL3-38B38.4BInternViT-6B-448px-V2.5Qwen2.5-32B77.3
InternVL3-78B78.4BInternViT-6B-448px-V2.5Qwen2.5-72B79.5
", + "bbox": [ + 187, + 88, + 803, + 199 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Table 1: Pre-trained models used in the InternVL3 series. The OpenCompass scores for the InternVL3 series were obtained through our local testing.", + "bbox": [ + 135, + 208, + 861, + 238 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "tion facilitates the handling of longer multimodal contexts without excessively extending the position window. Specifically, each training sample for the MLLM is represented as:", + "bbox": [ + 133, + 272, + 861, + 303 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {x} = \\left(x _ {1}, x _ {2}, \\dots , x _ {L}\\right), \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 416, + 308, + 859, + 325 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where each token $x_{i}$ can be a textual token embedding, a visual embedding, or another modality-specific representation (e.g., video patch embeddings). The position index $p_{i}$ for any token $x_{i}$ can be computed sequentially as follows:", + "bbox": [ + 133, + 330, + 861, + 373 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\np _ {i} = \\left\\{ \\begin{array}{l l} 0, & \\text {i f} i = 1, \\\\ f _ {\\text {p o s}} \\left(p _ {i - 1}, x _ {i}\\right), & \\text {f o r} i = 2, 3, \\dots , N. \\end{array} \\right. \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 349, + 375, + 859, + 407 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In contrast to traditional MLLMs, where position indices increment uniformly by 1 for each token, irrespective of modality, V2PE employs a modality-specific recursive function for position index computation. This results in distinct position index assignments for textual and visual tokens:", + "bbox": [ + 133, + 411, + 861, + 454 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\np _ {i} = p _ {i - 1} + \\left\\{ \\begin{array}{l l} 1, & \\text {i f} x _ {i} \\text {i s a t e x t u a l t o k e n ,} \\\\ \\delta , & \\text {i f} x _ {i} \\text {i s a v i s u a l t o k e n ,} \\end{array} \\right. \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 354, + 462, + 859, + 496 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\delta$ is a smaller increment ( $\\delta < 1$ ), reducing the rate at which position indices increase for visual tokens. The standard increment of 1 is retained for textual tokens to preserve their positional distinctions. In line with the original V2PE design, we maintain that $\\delta$ remains constant within a single image to preserve the relative positional relationships. During training, $\\delta$ is randomly chosen for each image from a predefined set of fractional values:", + "bbox": [ + 133, + 503, + 861, + 571 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\delta \\in \\Delta = \\left\\{1, \\frac {1}{2}, \\frac {1}{4}, \\frac {1}{8}, \\frac {1}{1 6}, \\frac {1}{3 2}, \\frac {1}{6 4}, \\frac {1}{1 2 8}, \\frac {1}{2 5 6} \\right\\}. \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 346, + 570, + 859, + 599 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "During inference, $\\delta$ can be flexibly selected based on the input sequence length, enabling a balance between task performance and ensuring that position indices remain within the model's valid context range. Notably, when $\\delta = 1$ , V2PE reverts to the conventional positional encoding used in InternVL2.5.", + "bbox": [ + 133, + 601, + 861, + 643 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "2.2 Native Multimodal Pre-Training", + "text_level": 1, + "bbox": [ + 135, + 659, + 405, + 674 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We propose a native multimodal pre-training approach that consolidates language pre-training and multi-modal alignment training into a single pre-training stage. Unlike conventional paradigms—where a language-only large model is first trained (typically with language pre-training followed by language post-training) and subsequently adapted to accommodate additional modalities—our method performs integrated optimization by interleaving multimodal data (e.g., image-text, video-text, or interleaved image-text sequences) with large-scale textual corpora during the pre-training process. This unified training scheme enables the pre-trained model to learn both linguistic and multimodal capabilities simultaneously, ultimately enhancing its capability to handle vision-language tasks without introducing additional bridging modules or subsequent inter-model alignment procedures.", + "bbox": [ + 133, + 684, + 861, + 811 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Multimodal Autoregressive Formulation. Let $\\mathcal{M}$ denote a Transformer-based model parameterized by $\\theta$ that can process text, image, and video simultaneously. Specifically, for an arbitrary training sample $\\mathbf{x} = (x_{1}, x_{2}, \\ldots, x_{L})$ with the token length of $L$ , we adopt the standard left-to-right autoregressive objective:", + "bbox": [ + 135, + 821, + 861, + 866 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\text {f u l l}} (\\theta) = - \\sum_ {i = 2} ^ {L} w _ {i} \\cdot \\log p _ {\\theta} \\left(x _ {i} \\mid x _ {1}, \\dots , x _ {i - 1}\\right), \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 328, + 875, + 859, + 914 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $w_{i}$ denotes the loss weight of token $i$ . Although this formulation naturally propagates gradients through tokens of all modalities, we restrict the loss computation exclusively to text tokens, resulting in:", + "bbox": [ + 133, + 90, + 861, + 122 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\text {t e x t - o n l y}} (\\theta) = - \\sum_ {\\substack {i = 2 \\\\ x _ {i} \\in \\text {T e x t}}} ^ {L} w _ {i} \\cdot \\log p _ {\\theta} \\left(x _ {i} \\mid x _ {1}, \\dots , x _ {i - 1}\\right). \\tag{6}\n$$\n", + "text_format": "latex", + "bbox": [ + 300, + 127, + 861, + 178 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Under this selective objective, visual tokens serve as conditioning context for text prediction and are not directly predicted. Consequently, the model learns to embed multimodal information in a manner that is beneficial for downstream language decoding tasks. Notably, regarding the design choice of the token weight $w_{i}$ , as discussed in InternVL2.5 [18], the widely used token averaging and sample averaging strategies can lead to gradients biased toward longer and shorter responses, respectively. To mitigate this issue, we adopt square averaging, which is defined as:", + "bbox": [ + 133, + 184, + 864, + 268 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nw _ {i} = \\left\\{ \\begin{array}{l l} \\frac {1}{l ^ {0}}, & \\text {f o r t o k e n a v e r a g i n g} \\\\ \\frac {1}{l ^ {0 . 5}}, & \\text {f o r s q u a r e a v e r a g i n g} \\\\ \\frac {1}{l ^ {1}}, & \\text {f o r s a m p l e a v e r a g i n g}, \\end{array} \\right. \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 372, + 284, + 861, + 338 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $l$ denotes the number of tokens in the training sample on which the loss needs to be calculated.", + "bbox": [ + 135, + 347, + 803, + 363 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Joint Parameter Optimization. Unlike the conventional \"language-only training followed by multimodal adaptation\" paradigm, our method updates all model parameters jointly during multimodal pre-training. Specifically, let", + "bbox": [ + 133, + 373, + 861, + 412 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\theta^ {*} = \\underset {\\theta} {\\arg \\min } \\mathbb {E} _ {\\mathbf {x} \\in \\mathcal {D} _ {\\text {m u l t i}}} \\left[ \\mathcal {L} _ {\\text {t e x t - o n l y}} (\\theta) \\right], \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 366, + 414, + 859, + 438 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\mathcal{D}_{\\mathrm{multi}}$ is the union of large-scale text-only and multimodal corpora (e.g., image-text or video-text pairs). We thus optimize a single model to handle these combined data sources. This multi-task joint optimization ensures that text representations and visual features are learned in concert, reinforcing alignment across modalities.", + "bbox": [ + 133, + 441, + 861, + 497 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Moreover, this integrated optimization departs from conventional \"language-only training followed by multimodal adaptation\" pipelines, which often freeze or partially fine-tune certain layers in the LLM component or even in the ViT encoder when adapting to MLLM. In contrast, our method trains every layer jointly, allowing all parameters to be jointly optimized on large-scale multimodal corpora and ensuring that both linguistic and visual features evolve synchronously. As a result, the final parameters are primed for high performance on both pure language and multimodal tasks, without additional tuning steps.", + "bbox": [ + 133, + 503, + 861, + 589 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Data. The pre-training data utilized in InternVL3 is broadly classified into two categories: multimodal data and pure language data. The multimodal dataset comprises a synthesis of pre-existing datasets alongside newly acquired real-world data. Specifically, we leverage the pre-training corpus from InternVL2.5, which covers a diverse range of domains such as image captioning, general question answering, mathematics, charts, optical character recognition (OCR), knowledge grounding, document understanding, multi-turn dialogue, and medical data. Although the overall data scale was not increased, the utility of this dataset was significantly improved by updating not only to the MLP module weights but also to those associated with the ViT and LLM components. In addition, to enhance the model's ability to generalize in real-world applications, additional data is incorporated from tasks related to graphical user interfaces (GUI), tool usage, 3D scene understanding, and video comprehension.", + "bbox": [ + 133, + 593, + 861, + 734 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "To compensate for the relatively short and less diverse textual content typically found in multimodal datasets, we integrate pure language data into the pre-training process. This helps preserve and amplify the model's capabilities in language understanding and generation. The language corpus is primarily constructed on the pre-training data from InternLM2.5 and is further augmented with various open-source text datasets [8, 77, 79]. This enhancement aims to improve the model's performance on knowledge-intensive tasks, as well as its proficiency in mathematical and reasoning tasks.", + "bbox": [ + 133, + 738, + 861, + 824 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Given the complexity of balancing these heterogeneous data sources, determining an appropriate sampling strategy is non-trivial. In InternVL3, we adopt a two-stage strategy to establish the optimal sampling ratio between multimodal and language data. Initially, we train separate models on the multimodal and language datasets and evaluate their performance on corresponding benchmarks, allowing us to identify optimal sampling ratios within each modality. Then, under a fixed total training budget, we combine the two modalities and determine their relative sampling ratio. Empirical studies show that a 1:3 ratio of language to multimodal data", + "bbox": [ + 133, + 827, + 861, + 912 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "yields the best overall performance across both unimodal and multimodal benchmarks. Under this configuration, the total number of training tokens is approximately 200 billion, comprising 50 billion from language data and 150 billion from multimodal data.", + "bbox": [ + 133, + 90, + 864, + 133 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "2.3 Post-Training", + "text_level": 1, + "bbox": [ + 135, + 150, + 274, + 166 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "After the Native Multimodal Pre-Training, we apply a two-stage post-training strategy to further enhance the multimodal conversation and reasoning abilities of our models. This strategy consists of Supervised Fine-Tuning (SFT) and Mixed Preference Optimization (MPO). In the SFT phase, the model is trained to imitate the high-quality responses under positive supervision signals. In the subsequent MPO phase, we introduce additional supervision from both positive and negative samples, thereby further improving its overall abilities.", + "bbox": [ + 133, + 176, + 864, + 247 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Supervised Fine-Tuning. In this phase, the techniques of random JPEG compression, square loss re-weighting, and multimodal data packing proposed in InternVL2.5 [18] are also employed in the InternVL3 series. The main advancement of the SFT phase in InternVL3 compared to InternVL2.5 lies in the use of higher-quality and more diverse training data. Specifically, we further extend training samples for tool usage, 3D scene understanding, GUI operations, long context tasks, video understanding, scientific diagrams, creative writing, and multimodal reasoning.", + "bbox": [ + 133, + 251, + 864, + 335 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Mixed Preference Optimization. During Pre-training and SFT, the model is trained to predict the next token conditioned on previous ground-truth tokens. However, during inference, the model predicts each token based on its own prior outputs. This discrepancy between ground-truth tokens and model-predicted tokens introduces a distribution shift, which can impair the model's Chain-of-Thought (CoT) reasoning capabilities. To mitigate this issue, we employ Mixed Preference Optimization (MPO) [124], which introduces additional supervision from both positive and negative samples to align the model response distribution with the ground-truth distribution, thereby improving reasoning performance. Specifically, the training objective of MPO is a combination of preference loss $\\mathcal{L}_p$ , quality loss $\\mathcal{L}_q$ , and generation loss $\\mathcal{L}_g$ , which can be formulated as follows:", + "bbox": [ + 133, + 340, + 864, + 455 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} = w _ {p} \\mathcal {L} _ {p} + w _ {q} \\mathcal {L} _ {q} + w _ {g} \\mathcal {L} _ {g}, \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 398, + 460, + 862, + 478 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where $w_{*}$ represents the weight assigned to each loss component. Specifically, the DPO loss [101] serves as the preference loss to enable the model to learn the relative preference between chosen and rejected responses:", + "bbox": [ + 133, + 484, + 862, + 513 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {p} = - \\log \\sigma \\left(\\beta \\log \\frac {\\pi_ {\\theta} \\left(y _ {c} \\mid x\\right)}{\\pi_ {0} \\left(y _ {c} \\mid x\\right)} - \\beta \\log \\frac {\\pi_ {\\theta} \\left(y _ {r} \\mid x\\right)}{\\pi_ {0} \\left(y _ {r} \\mid x\\right)}\\right), \\tag {10}\n$$\n", + "text_format": "latex", + "bbox": [ + 325, + 518, + 862, + 550 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where $\\beta$ is the KL penalty coefficient, and $x$ , $y_{c}$ , and $y_{r}$ are user query, chosen response, and rejected response, respectively. The policy model $\\pi_{\\theta}$ is initialized from model $\\pi_0$ . After that, the BCO loss [53] is employed as the quality loss, which helps the model to understand the absolute quality of individual responses:", + "bbox": [ + 133, + 555, + 862, + 599 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {q} = \\mathcal {L} _ {q} ^ {+} + \\mathcal {L} _ {q} ^ {-}, \\tag {11}\n$$\n", + "text_format": "latex", + "bbox": [ + 439, + 604, + 862, + 625 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where $\\mathcal{L}_q^+$ and $\\mathcal{L}_q^-$ represent the loss for chosen and rejected responses, respectively. They are calculated independently, requiring the model to differentiate the absolute quality of individual responses. The loss terms are given by:", + "bbox": [ + 133, + 631, + 862, + 672 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {q} ^ {+} = - \\log \\sigma \\left(\\beta \\log \\frac {\\pi_ {\\theta} \\left(y _ {c} \\mid x\\right)}{\\pi_ {0} \\left(y _ {c} \\mid x\\right)} - \\delta\\right), \\tag {12}\n$$\n", + "text_format": "latex", + "bbox": [ + 372, + 672, + 862, + 704 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {q} ^ {-} = - \\log \\sigma \\left(- \\left(\\beta \\log \\frac {\\pi_ {\\theta} \\left(y _ {r} \\mid x\\right)}{\\pi_ {0} \\left(y _ {r} \\mid x\\right)} - \\delta\\right)\\right), \\tag {13}\n$$\n", + "text_format": "latex", + "bbox": [ + 352, + 705, + 862, + 736 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where $\\delta$ represents the reward shift, calculated as the moving average of previous rewards to stabilize training. Finally, the LM loss is used as the generation loss to help the model learn the generation process of preferred responses. The loss function is defined in Equation 6.", + "bbox": [ + 133, + 738, + 862, + 781 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Data. For SFT data, we construct the training corpora based on those used in InternVL2.5 [18] while introducing additional tool usage, 3D scene understanding, GUI operations, scientific diagrams, creative writing, and multimodal reasoning samples. As a result, the number of training samples grows from 16.3M in InternVL2.5 to 21.7M in InternVL3. For MPO data, we construct preference pairs based on the data pipeline and samples proposed in MMPR v1.2 [124], which cover a wide range of domains, including general visual question answering (VQA) [43, 50, 90, 83, 127, 126], science [57, 16, 82], chart [91, 54, 11], mathematics [72, 104, 10, 81, 55, 40, 147, 106], OCR [92, 107, 9, 49, 96], and document [24]. We use the SFT versions of InternVL3-8B, 38B, and 78B to generate rollouts. During the MPO phase, all models are trained on the same dataset, which comprises about 300K samples.", + "bbox": [ + 133, + 786, + 864, + 912 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 493, + 936, + 504, + 946 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "2.4 Test-Time Scaling", + "text_level": 1, + "bbox": [ + 135, + 90, + 303, + 107 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Test-Time Scaling has been shown to be an effective method to enhance the reasoning abilities of LLMs and MLLMs [108, 94, 87, 70, 120, 36, 152, 125]. In this work, we use the Best-of-N evaluation strategy and employ VisualPRM-8B [125] as the critic model to select the best response for reasoning and mathematics evaluation.", + "bbox": [ + 133, + 116, + 861, + 160 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Visual Process Reward Model. VisualPRM first assigns a quality score to each step of the given solution and then averages these scores to obtain the overall score for this solution. This process is formulated as a multi-turn chat task so that we can effectively leverage the generation ability of MLLMs. The image $I$ , question $q$ , and the first step $s_0$ of the step-by-step solution $s = \\{s_0, s_1, \\dots, s_n\\} \\in S$ to this question are included in the first turn and a new step is presented in each subsequent turn. During the training stage, the model is required to predict the correctness of the given step in each turn as follows:", + "bbox": [ + 133, + 165, + 861, + 250 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\nc _ {i} \\sim M \\left(y _ {i} \\mid I, q, s _ {\\leq i}\\right), \\tag {14}\n$$\n", + "text_format": "latex", + "bbox": [ + 419, + 256, + 859, + 273 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "where $c_{i} \\in \\{+, -\\}$ denotes the correctness of $i$ -th step. During the inference stage, the score for each step is defined as the probability of generating \"+\"", + "bbox": [ + 133, + 280, + 859, + 310 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Data. VisualPRM400K [125] is used to train VisualPRM, which is constructed based on multimodal questions collected from MMPR v1.2 [124]. Following the data pipeline in VisualPRM400K, we further expand VisualPRM400K by sampling rollouts from the 8B and 38B variants of InternVL3.", + "bbox": [ + 133, + 315, + 861, + 358 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "2.5 Infrastructure", + "text_level": 1, + "bbox": [ + 135, + 375, + 279, + 388 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "To facilitate model training, we extend the InternEVO framework [15]—originally designed to optimize the Zero Redundancy Optimizer (ZeRO) for large-scale LLM training—to support the training of our InternVL models. This extension enables efficient scaling to hundreds of billions of parameters across thousands of GPUs. The enhanced framework introduces flexible and decoupled sharding strategies for the ViT, MLP, and LLM components, significantly improving training efficiency by overlapping communication and computation. It further supports a comprehensive range of parallelism strategies—including data, tensor, sequence, and pipeline parallelism—as well as their arbitrary combinations.", + "bbox": [ + 133, + 400, + 861, + 500 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "A key challenge in MLLM training is the imbalance in computational load caused by the varying proportions of visual and textual tokens. Such imbalances can lead to inefficiencies by overburdening either the ViT or LLM modules. To address this, we introduce a suite of techniques that dynamically balance computational workloads across modules, ensuring efficient and equitable resource utilization.", + "bbox": [ + 133, + 503, + 861, + 559 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "For InternVL models of varying scales, the extended InternEVO framework formulates an optimization objective that identifies the optimal configuration to minimize both memory consumption and communication overhead across different module dimensions. To support sequences of up to 32K tokens, our approach incorporates both head-parallel and sequence-parallel techniques, effectively overcoming scalability bottlenecks while preserving computational efficiency. Compared to the training of InternVL2.5, the application of InternEVO in InternVL3 results in a training speedup of $50\\%$ to $200\\%$ for models of comparable size, given the same computational budget.", + "bbox": [ + 133, + 566, + 861, + 662 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "3 Experiments", + "text_level": 1, + "bbox": [ + 135, + 684, + 277, + 700 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "In this section, we first compare the overall multimodal capabilities of InternVL3 with those of current advanced MLLMs using widely adopted multimodal benchmarks. Subsequently, we evaluate the performance of InternVL3 in various domains, including multimodal reasoning, mathematics, optical character recognition (OCR), chart and document understanding, multi-image understanding, real-world comprehension, comprehensive multimodal evaluation, multimodal hallucination evaluation, visual grounding, multimodal multilingual understanding, video understanding, and other multimodal tasks, most of which were tested using VLMEvalKit [33]. Additionally, we provide a detailed evaluation of the language capabilities of InternVL3. Finally, we analyze the advantages of several key modifications in InternVL3 compared to its predecessor, InternVL2.5, including the naive multimodal pre-training, the V2PE positional encoding, and the improvements brought by the post-training technique.", + "bbox": [ + 133, + 715, + 861, + 842 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "3.1 Overall Comparison to Other Advanced MLLMs", + "text_level": 1, + "bbox": [ + 135, + 857, + 522, + 872 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Figure 1 provides a detailed assessment of InternVL3's performance across a diverse set of benchmarks, including MMMU [141], MathVista [80], AI2D [57], ChartQA [91], DocVQA [93], InfographicVQA [92],", + "bbox": [ + 133, + 883, + 861, + 912 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "HallusionBench [45], OCRBench [76], and LongVideoBench [129]. Compared with previous models, InternVL3 demonstrates substantial improvements across a wide range of task categories. These advancements can be primarily attributed to enhanced training strategies, refined testing methodologies, and the expanded training corpus.", + "bbox": [ + 133, + 90, + 861, + 148 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "More specifically, InternVL3 achieves an impressive score of 72.2 on the MMMU benchmark, underscoring its superior capacity to manage complex multimodal challenges. Beyond its performance on MMMU, InternVL3 consistently outperforms earlier versions of the InternVL series on a variety of tasks, thereby emphasizing its broad applicability to real-world scenarios that require sophisticated multimodal comprehension and reasoning.", + "bbox": [ + 133, + 152, + 861, + 210 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In addition to surpassing its open-source counterparts, InternVL3 exhibits competitive performance relative to leading closed-source commercial models, such as ChatGPT-4o-latest [98] and Claude-3.5 Sonnet [3]. In many cases, the performance gap between InternVL3 and these proprietary models is notably narrowed—and in certain benchmarks, such as AI2D and ChartQA, InternVL3 even surpasses them. Nonetheless, our results further reveal that Gemini2.5 Pro [117] maintains a performance edge on select tasks (e.g., on HallusionBench), indicating that despite the notable progress in InternVL3, there remains room for further refinement of our InternVL series.", + "bbox": [ + 133, + 215, + 861, + 311 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "3.2 Multimodal Reasoning and Mathematics", + "text_level": 1, + "bbox": [ + 135, + 330, + 462, + 345 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "To comprehensively evaluate the multimodal reasoning and mathematical capabilities of InternVL3, we conduct experiments on a series of benchmarks, including MMMU [141] for multidisciplinary reasoning, MathVista [80], MathVision [119], MathVerse [146] for mathematical reasoning, as well as DynaMath [155], WeMath [99] and LogicVista [131] for complementary evaluation on logical reasoning.", + "bbox": [ + 133, + 356, + 861, + 412 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "As shown in Table 2, InternVL3 exhibits strong performance across all tested benchmarks. Specifically, on the MMMU benchmark, InternVL3-based models consistently outperform smaller-scale competitors. For instance, with increasing model size, InternVL3-78B reaches a score over 72 on MMMU, indicating robust understanding and reasoning capability in handling abstract multidisciplinary concepts. In the mathematical domain, InternVL3 demonstrates significant gains across various benchmarks. On MathVista, InternVL3-78B records a performance close to 79.0, while on MathVision and MathVerse, the results are also competitive, evidencing the model's enhanced ability to tackle challenging mathematical problems. Furthermore, performance on DynaMath, WeMath, and LogicVista consistently improves with scaling. The overall score—a mean calculated across all benchmarks—shows that InternVL3 models achieve a balanced enhancement across different aspects, surpassing many of the preceding open-source methods.", + "bbox": [ + 133, + 417, + 861, + 556 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "A notable characteristic of InternVL3 is the efficiency of the best-of-N evaluation strategy [125]. When applying this method, even models with relatively smaller parameter sizes (e.g., InternVL3-1B and InternVL3-2B) exhibit substantial improvements in reasoning performance. Specifically, in the Vision-Only split of MathVerse, the best-of-8 strategy leads to increases of approximately 6.0 and 3.2 percentage points for InternVL3-38B and InternVL3-78B, respectively. This improvement underscores the effectiveness of test-time scaling.", + "bbox": [ + 133, + 561, + 861, + 633 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "3.3 OCR, Chart, and Document Understanding", + "text_level": 1, + "bbox": [ + 135, + 650, + 482, + 666 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "To assess the model's integrated vision-language understanding in tasks involving text, document, and chart comprehension, we perform a comprehensive evaluation over nine benchmarks, including AI2D [57], ChartQA [91], TextVQA [107], DocVQA [93], InfoVQA [92], OCRBench [76], SEED-2-Plus [61], CharXiv [128], and VCR [148]. As illustrated in Table 3, the InternVL3 series not only maintains robust performance across these benchmarks but also demonstrates competitive or superior results when compared to other open-source and closed-source counterparts.", + "bbox": [ + 133, + 676, + 861, + 760 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "At the 1B scale, InternVL3-1B achieves performance that is roughly on par with previous lower-scale models. At the 2B scale, InternVL3-2B not only improves its absolute scores—for instance, reaching 78.7/87.4 on AI2D and 88.3 on DocVQA—but also exhibits a performance edge over similarly parameterized models such as Qwen2-VL-2B [121]. Although its TextVQA performance (77.0) remains comparable to that of Qwen2-VL-2B, the enhancements in document and chart understanding suggest that the proposed native multimodal pre-training are particularly effective in tasks requiring precise visual-textual integration.", + "bbox": [ + 133, + 766, + 861, + 851 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "The benefits of the new pre-training protocol become even more pronounced at larger scales. Mid-scale models like InternVL3-8B and InternVL3-9B deliver substantial gains, with InternVL3-8B achieving 85.2/92.6 on AI2D, 92.7 on DocVQA, and VCR scores of 94.5/98.1. Moreover, when compared with heavyweight systems such as Qwen2-VL-72B [121] or even closed-source models like GPT-4o-20240513 [97], the high-scale variants", + "bbox": [ + 133, + 854, + 861, + 912 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/7dfb9f75ca6b528ec382461cc9c8557cfaeffdfdd09cb987fa67c8df23797837.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ModelMMMUMathVistaMathVisionMathVerseDynaMathWeMathLogicVistaOverall
LLaVA-OV-0.5B [60]31.434.8------
InternVL2.5-1B [18]41.247.121.116.45.611.126.024.1
InternVL3-1B43.445.818.818.75.813.429.825.1
w/ VisualPRM-Bo8 [125]55.462.121.728.913.428.534.935.0
Aquila-VL-2B [44]46.959.117.917.45.015.930.627.5
Qwen2.5-VL-3B [7]51.261.221.931.213.222.940.334.6
Ovis-2B [84]45.664.117.729.410.09.934.730.2
Ovis-4B [84]49.069.621.538.518.016.935.335.5
InternVL2.5-2B [18]43.251.114.022.34.48.027.324.3
InternVL2.5-4B [18]51.864.118.427.715.221.234.233.2
InternVL3-2B48.657.021.725.314.622.436.932.4
w/ VisualPRM-Bo8 [125]57.870.526.636.721.438.540.541.7
LLaVA-OV-7B [60]47.958.618.319.39.020.933.329.6
MiniCPM-V2.6 [135]49.860.823.418.99.816.427.529.5
MiniCPM-o2.6 [135]50.973.321.735.010.425.236.036.1
Ovis-8B [84]57.471.825.942.320.427.239.440.6
Qwen2.5-VL-8B [7]55.067.825.441.121.035.244.141.4
InternVL2.5-8B [18]56.264.517.022.89.423.536.032.8
InternVL3-8B62.771.629.339.825.537.144.144.3
w/ VisualPRM-Bo8 [125]66.075.237.546.328.548.149.750.2
InternVL3-9B57.771.527.635.326.733.849.243.1
w/ VisualPRM-Bo8 [125]63.776.233.945.829.146.650.649.4
Ovis2-16B [84]60.773.730.145.826.345.047.447.0
InternVL2.5-26B [18]60.768.223.424.011.430.939.636.9
InternVL3-14B67.175.137.244.431.343.051.249.9
w/ VisualPRM-Bo8 [125]69.377.940.147.733.152.056.253.8
Cambrian-34B [116]49.753.2------
VILA-1.5-40B [71]55.149.5------
Ovis2-34B [84]66.776.131.950.127.551.949.950.6
InternVL2.5-38B [18]63.971.932.236.920.038.347.944.4
InternVL3-38B70.175.134.248.235.348.658.452.8
w/ VisualPRM-Bo8 [125]71.079.441.854.236.155.258.456.6
GPT-4o-20241120 [97]70.760.031.240.634.545.852.847.9
Claude-3.7-Sonnet [3]75.066.841.946.739.749.358.253.9
Gemini-2.0-Flash [30]72.670.443.647.842.147.452.353.7
Gemini-2.0-Pro [29]69.971.348.167.343.356.553.258.5
LLaVA-OV-72B [60]55.767.125.327.215.632.040.937.7
QvQ-72B-Preview [115]70.370.334.948.230.739.058.250.2
Qwen2.5-VL-72B [7]68.274.239.347.335.949.155.752.8
InternVL2.5-78B [18]70.072.332.239.219.239.849.046.0
InternVL3-78B72.279.043.151.035.146.155.954.6
w/ VisualPRM-Bo8 [125]72.280.540.854.237.352.457.956.5
", + "bbox": [ + 166, + 85, + 831, + 580 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Table 2: Comparison of multimodal reasoning and mathematical performance. MMMU [141] is a multidisciplinary reasoning benchmark. MathVista [80], MathVision [119], MathVerse [146], DynaMath [155], and WeMath [99] are mathematics benchmarks. For MathVerse, we report the performance on Vision-Only split. LogicVista [131] is a logical reasoning benchmark. Part of the results are collected from the OpenCompass leaderboard [26]. The overall score is the average score of the above benchmarks. \"w/ VisualPRM-Bo8\" denotes that the model is evaluated with Best-of-8 settings, where VisualPRM [125] serves as the critic model.", + "bbox": [ + 135, + 587, + 864, + 674 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "of InternVL3—particularly InternVL3-38B and InternVL3-78B—push the envelope further. For instance, InternVL3-78B attains a remarkable OCRBench score of 906 and VCR scores of 96.0/98.6, clearly surpassing the corresponding metrics of comparable models.", + "bbox": [ + 133, + 702, + 864, + 747 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "3.4 Multi-Image Understanding", + "text_level": 1, + "bbox": [ + 135, + 766, + 377, + 782 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "we evaluate the multi-image relation perception and understanding capabilities of InternVL3 across a suite of widely recognized benchmarks, including BLINK [39], Mantis-Eval [51], MMIU [95], MuirBench [118], MMT-Bench [137], and MIRB [153], as presented in Table 4. These benchmarks comprehensively assess skills such as cross-image reasoning and context integration, all of which are crucial for effective multimodal interaction.", + "bbox": [ + 133, + 792, + 861, + 864 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "InternVL3 consistently outperforms its earlier counterparts across different parameter scales. For instance, at the 1B scale, InternVL3-1B exhibits a modest yet consistent improvement over preceding models, achieving a BLINK score of 42.9 and an MMT-Bench score of 52.9. The performance gains become even more pronounced", + "bbox": [ + 135, + 869, + 864, + 914 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 8 + }, + { + "type": "table", + "img_path": "images/c59cf6f8b14dce70a608cfda365f8d999fdb86411a13c8775b6106a10a119b34.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Model NameAI2D (w / wo M)ChartQA (test avg)TextVQA (val)DocVQA (test)InfoVQA (test)OCR BenchSEED-2 PlusCharXiv (RQ / DQ)VCR-EN-Easy (EM / Jaccard)Overall
LLaVA-OneVision-0.5B [60]57.1 / -61.4-70.041.8565----
InternVL2-1B [19]64.1 / 70.572.970.581.750.975454.318.1 / 30.721.5 / 48.454.9
InternVL2.5-1B [18]69.3 / 77.875.972.084.856.078559.019.0 / 38.491.5 / 97.068.3
InternVL3-1B69.4 / 78.375.374.181.953.779058.221.0 / 47.189.3 / 96.268.6
Qwen2-VL-2B [121]74.7 / 84.673.579.790.165.580962.4-81.5 / --
Qwen2.5-VL-3B [7]81.6 / -84.079.393.977.179767.631.3 / 58.6--
Aquila-VL-2B [44]75.0 / -76.576.485.058.377263.0-70.0 / --
InternVL2-2B [19]74.1 / 82.376.273.486.958.978460.021.0 / 40.632.9 / 59.262.0
InternVL2.5-2B [18]74.9 / 83.579.274.388.760.980460.921.3 / 49.793.2 / 97.672.1
InternVL3-2B78.7 / 87.480.277.088.366.183564.628.3 / 54.791.2 / 96.974.7
Ovis1.6-Gemma2-9B [84]84.4 / -----830----
MiniCPM-V2.6 [135]82.1 / -82.480.190.8-85265.731.0 / 57.173.9 / 85.7-
Molmo-7B-D [31]- / 93.284.181.792.272.6694----
Qwen2-VL-7B [121]83.0 / 92.183.084.394.576.586669.0-89.7 / 93.8-
Qwen2.5-VL-7B [7]83.9 / -87.384.995.782.686470.442.5/73.9--
InternVL2-8B [19]83.8 / 91.783.377.491.674.879467.531.2 / 56.137.9 / 61.569.7
InternVL2.5-8B [18]84.5 / 92.884.879.193.077.682269.732.9 / 68.692.6 / 97.479.6
InternVL3-8B85.2 / 92.686.680.292.776.888069.737.6 / 73.694.5 / 98.181.3
InternVL3-9B84.6 / 92.986.279.493.679.687768.838.0 / 72.594.2 / 97.981.3
InternVL3-14B86.0 / 93.787.380.594.183.687570.343.1 / 82.294.8 / 98.283.4
InternVL-Chat-V1.5 [19]80.7 / 89.883.880.690.972.572466.329.2 / 58.514.7 / 51.465.9
InternVL2-26B [19]84.5 / 92.584.982.392.975.982567.633.4 / 62.474.5 / 86.776.7
InternVL2.5-26B [18]86.4 / 94.487.282.494.079.885270.835.9 / 73.594.4 / 98.081.8
Qwen2.5-VL-32B [7]---94.883.4-----
Cambrian-34B [116]79.5 / -75.676.775.546.0600-27.3 / 59.779.7 / 89.3-
VILA-1.5-40B [71]69.9 / -67.273.6--460-24.0 / 38.7--
InternVL2-40B [19]86.6 / 94.586.283.093.978.783769.232.3 / 66.084.7 / 92.679.3
InternVL2.5-38B [18]87.6 / 95.188.282.795.383.684271.242.4 / 79.694.7 / 98.283.6
InternVL3-38B88.9 / 95.589.283.995.485.088671.646.4 / 87.296.1 / 98.785.5
GPT-4V [97]78.2 / 89.478.578.088.475.164553.837.1 / 79.952.0 / 65.470.0
GPT-4o-20240513 [97]84.6 / 94.285.777.492.879.273672.047.1 / 84.591.6 / 96.481.6
Claude-3-Opus [3]70.6 / 88.180.867.589.355.669444.230.2 / 71.662.0 / 77.767.3
Claude-3.5-Sonnet [3]81.2 / 94.790.874.195.274.378871.760.2 / 84.363.9 / 74.778.7
Gemini-1.5-Pro [102]79.1 / 94.487.278.893.181.0754-43.3 / 72.062.7 / 77.7-
LLaVA-OneVision-72B [60]85.6 / -83.780.591.374.9741----
NVLM-D-72B [28]85.2 / 94.286.082.192.6-853----
Molmo-72B [31]- / 96.387.383.193.581.9-----
Qwen2-VL-72B [121]88.1 / -88.385.596.584.5877--91.3 / 94.6-
Qwen2.5-VL-72B [7]88.7 / -89.583.596.487.388573.049.7 / 87.4--
InternVL2-Llama3-76B [19]87.6 / 94.888.484.494.182.083969.738.9 / 75.283.2 / 91.381.1
InternVL2.5-78B [18]89.1 / 95.788.383.495.184.185471.342.4 / 82.395.7 / 94.583.9
InternVL3-78B89.7 / 96.089.784.395.486.590671.946.0 / 85.196.0 / 98.685.8
", + "bbox": [ + 138, + 87, + 859, + 619 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Table 3: Comparison of OCR, chart, and document understanding performance. We evaluate OCR-related capabilities across 9 benchmarks, including AI2D [57], ChartQA [91], TextVQA [107], DocVQA [93], InfoVQA [92], OCRBench [76], SEED-2-Plus [61], CharXiv [128], and VCR [148]. Part of results are collected from [34, 31, 3, 128, 148] and the OpenCompass leaderboard [26].", + "bbox": [ + 133, + 627, + 859, + 684 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "at the 2B scale; InternVL3-2B attains a remarkable 65.9 on Mantis-Eval, representing an improvement of over 11 points relative to InternVL2.5-2B, and also boosts its MMT-Bench performance to 59.5. Such enhancements indicate that the advanced pre-training strategies and enhanced training datasets in InternVL3 significantly elevate its capability to capture and reason over inter-image relationships.", + "bbox": [ + 133, + 724, + 859, + 781 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "At higher scales, the trend continues. InternVL3-8B and its subsequent larger variants not only secure steady improvements on BLINK and MMT-Bench but also demonstrate substantial gains on the MIRB and MuirBench benchmarks. In particular, InternVL3-78B reaches a BLINK score of 66.3 and an MMT-Bench score of 73.2, positioning it as a competitive alternative to leading closed-source models like GPT-4o. These results suggest that the learning multimodal capabilities via native multimodal pre-training and the scaling of model parameters are key contributors to the elevated performance observed across diverse evaluation settings. Despite these encouraging outcomes, a noticeable performance gap between our InternVL3 and other MLLMs like Qwen2.5-VL still exists on certain benchmarks, such as MuirBench, implying that future work may benefit from further enhancements in training data curation and additional model refinements.", + "bbox": [ + 133, + 787, + 861, + 912 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 9 + }, + { + "type": "table", + "img_path": "images/82fc116de0b0bf8c97c7d043c9af4f27e7a29697d82db4c93020c1d15f127367.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Model NameBLINK (val)Mantis EvalMMIUMuir BenchMMT (val)MIRB (avg)OverallRealWorld QAMME-RW (EN)WildVision (win rate)R-Bench (dis)Overall
LLaVA-OneVision-0.5B [60]52.139.6-25.5---55.6----
InternVL2-1B [19]38.646.137.329.349.531.538.750.340.217.855.641.0
InternVL2.5-1B [18]42.051.238.529.950.335.641.357.544.243.459.051.0
InternVL3-1B42.950.239.331.252.936.142.158.246.043.860.452.1
Qwen2-VL-2B [121]44.4---55.1--62.6----
Qwen2.5-VL-3B [6]47.6--47.7---65.453.1---
InternVL2-2B [19]43.848.439.832.550.432.141.257.347.331.856.848.3
InternVL2.5-2B [18]44.054.843.540.654.536.445.660.148.844.262.253.8
InternVL3-2B50.365.943.038.859.542.950.164.353.848.867.558.6
Qwen2-VL-7B [121]53.2---64.0--70.156.5-64.0-
Qwen2.5-VL-7B [6]56.4--59.6---68.557.4---
MiniCPM-V2.6 [135]53.069.0--60.8--65.0----
InternVL2-8B [19]50.965.442.048.760.050.052.864.453.554.467.960.1
InternVL2.5-8B [18]54.867.746.751.162.352.555.970.159.162.070.165.3
InternVL3-8B55.570.146.855.065.056.858.270.862.069.874.169.2
InternVL3-9B58.670.150.451.465.458.659.170.561.363.870.366.5
InternVL3-14B60.376.050.956.270.359.362.270.764.069.869.368.5
InternVL-Chat-V1.5 [19]46.666.837.438.558.050.349.666.049.456.667.960.0
InternVL2-26B [19]56.269.642.650.660.653.755.668.358.762.270.164.8
InternVL2.5-26B [18]61.875.649.461.166.955.761.874.561.865.272.968.6
Cambrian-34B [116]-------67.844.1---
InternVL2-40B [19]57.271.447.954.466.255.258.771.861.863.273.367.5
InternVL2.5-38B [18]63.278.355.362.770.061.265.173.564.066.472.169.0
InternVL3-38B64.077.957.463.871.862.366.275.667.371.673.372.0
GPT-4V [97]54.662.7-62.364.353.1-61.4-71.865.6-
GPT-4o-20240513 [97]68.0-55.768.065.4--75.445.280.677.769.7
Claude-3.5-Sonnet [3]--53.4----60.151.6---
Gemini-1.5-Pro [102]--53.4-64.5--67.538.2---
LLaVA-OneVision-72B [60]55.477.6-54.8---71.9----
Qwen2-VL-72B [121]----71.8--77.8----
Qwen2.5-VL-72B [6]64.4--70.7---75.763.2---
InternVL2-Llama3-76B [19]56.873.744.251.267.458.258.672.263.065.874.168.8
InternVL2.5-78B [18]63.877.055.863.570.861.165.378.762.971.477.272.6
InternVL3-78B66.379.360.464.573.264.368.078.065.473.677.473.6
", + "bbox": [ + 138, + 87, + 859, + 523 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Table 4: Comparison of multi-image and real-world understanding performance. Multi-image benchmarks include BLINK [39], Mantis-Eval [51], MMIU [95], MuirBench [118], MMT-Bench [137], and MIRB [153]. Real-world benchmarks encompass RealWorldQA [27], MME-RealWorld [151], WildVision [86], and R-Bench [62]. Part of the results are sourced from the benchmark papers and the OpenCompass leaderboard [26].", + "bbox": [ + 135, + 531, + 864, + 590 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "3.5 Real-World Comprehension", + "text_level": 1, + "bbox": [ + 135, + 643, + 375, + 660 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "We evaluate the InternVL3 series on four real-world comprehension benchmarks—RealWorldQA [27], MME-RealWorld [151], WildVision [86], and R-Bench [62]—to assess its ability to tackle realistic and complex tasks. As shown in Table 4, even the smallest variant in the InternVL3 family (InternVL3-1B) demonstrates promising performance with a RealWorldQA score of 58.2, an MME-RealWorld score of 46.0, a WildVision win rate of 43.8, and an R-Bench score of 60.4. Scaling up the model yields further enhancements across all metrics. Mid-sized variants such as InternVL3-8B and InternVL3-14B continue this positive trend, with InternVL3-8B reporting a RealWorldQA score of 70.8 and an R-Bench score of 74.1. These improvements highlight the effectiveness of scaling, as larger models provide more robust representations and enhanced comprehension capabilities in real-world scenarios.", + "bbox": [ + 133, + 683, + 861, + 808 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "At the higher end of the scale, the InternVL3-38B and InternVL3-78B models achieve top-tier results among the InternVL3 series. Notably, InternVL3-78B records a RealWorldQA score of 78.0, an MME-RealWorld score of 65.4, a WildVision win rate of 73.6, and an R-Bench score of 77.4. When compared with competitive models, such as GPT-4o [97]—which scores 75.4 on RealWorldQA and 80.6 on WildVision—the InternVL3 series exhibits competitive strengths. InternVL3-78B not only surpasses GPT-4o on RealWorldQA and closely matches its R-Bench performance but also considerably outperforms it on MME-RealWorld, indicating an overall robust performance on tasks demanding both perceptual precision and comprehensive understanding.", + "bbox": [ + 133, + 813, + 861, + 912 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 10 + }, + { + "type": "table", + "img_path": "images/4dc344ceba0082d124ada389f229dd7aa3fbe789254ac6aab91cf6b16f3c9dcf.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Model NameMME (sum)MMB (EN / CN)MMBv1.1 (EN)MMVet (turbo)MMVet2 (0613)MMStarOverallHallBench (avg)MMHal (score)CRPE (relation)POPE (avg)Overall
LLaVA-OneVision-0.5B [60]1438.061.6 / 55.559.632.2-37.7-27.9----
InternVL2-1B [19]1794.465.4 / 60.761.632.736.145.751.734.02.2557.587.345.3
InternVL2.5-1B [18]1950.570.7 / 66.368.448.843.250.158.939.02.4960.989.948.1
InternVL3-1B1934.472.6 / 67.969.959.547.551.561.941.42.5964.090.749.7
Qwen2-VL-2B [121]1872.074.9 / 73.572.249.5-48.0-41.7----
Qwen2.5-VL-3B [6]215779.1 / 78.177.461.8-55.9-46.3-73.6--
InternVL2-2B [19]1876.873.2 / 70.970.239.539.650.158.037.92.5266.388.348.8
InternVL2.5-2B [18]2138.274.7 / 71.972.260.852.353.765.342.62.9470.290.651.6
InternVL3-2B2221.281.1 / 78.478.662.253.960.769.842.53.2671.589.651.7
Qwen2-VL-7B [121]2326.883.0 / 80.580.762.0-60.7-50.63.4074.488.154.1
Qwen2.5-VL-7B [6]234783.5 / 83.482.667.1-63.9-52.9-76.4--
MiniCPM-V2.6 [135]2348.481.5 / 79.378.060.0-57.5-48.13.6075.287.353.6
InternVL2-8B [19]2210.381.7 / 81.279.554.252.362.069.245.23.3375.886.952.8
InternVL2.5-8B [18]2344.184.6 / 82.683.262.858.162.873.250.13.6578.490.655.7
InternVL3-8B2415.483.4 / 82.281.781.366.368.277.749.93.6176.391.155.2
InternVL3-9B2372.883.4 / 82.281.776.265.466.376.351.23.4775.090.455.0
InternVL3-14B2478.385.6 / 84.183.580.268.468.879.055.13.4977.390.256.5
InternVL-Chat-V1.5 [19]2194.282.2 / 82.080.361.551.557.369.750.33.1175.488.454.3
InternVL2-26B [19]2260.783.4 / 82.081.562.157.261.271.850.73.5575.688.054.5
InternVL2.5-26B [18]2373.385.4 / 85.584.265.060.866.575.255.03.7079.190.657.1
Cambrian-34B [116]-80.4 / 79.278.353.2-54.2-41.6----
InternVL2-40B [19]2307.586.8 / 86.585.165.563.865.475.756.93.7577.688.456.7
InternVL2.5-38B [18]2455.886.5 / 86.385.568.862.167.977.056.83.7178.390.757.4
InternVL3-38B2523.687.6 / 86.886.983.969.671.581.557.13.7777.190.657.1
GPT-4V [97]1926.681.0 / 80.280.067.566.356.070.746.5----
GPT-4o-20240513 [97]-83.4 / 82.183.169.171.064.7-55.04.0076.686.955.6
Claude-3-Opus [3]1586.863.3 / 59.260.151.755.845.755.537.8----
Claude-3.5-Sonnet [3]-82.6 / 83.580.970.171.865.1-55.5----
Gemini-1.5-Pro [102]-73.9 / 73.874.664.066.959.1-45.6----
LLaVA-OneVision-72B [60]2261.085.8 / 85.385.060.6-65.8-49.0----
Qwen2-VL-72B [121]2482.786.5 / 86.685.974.066.968.378.758.1----
Qwen2.5-VL-72B [6]2448.088.6 / 87.988.476.2-70.8-55.2-79.2--
InternVL2-Llama3-76B [19]2414.786.5 / 86.385.565.768.467.477.255.23.8377.689.056.4
InternVL2.5-78B [18]2494.588.3 / 88.587.472.365.569.579.257.43.8978.890.857.7
InternVL3-78B2549.889.0 / 88.787.781.370.072.582.059.13.8579.290.358.1
", + "bbox": [ + 138, + 87, + 859, + 536 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Table 5: Comparison of comprehensive multimodal understanding and hallucination performance. Comprehensive multimodal benchmarks include MME [37], MMBench series [75], MMVet series [138, 139], and MMStar [13]. Hallucination benchmarks encompass HallusionBench [45], MMHal [111], CRPE [126], and POPE [67]. Part of the results are sourced from the benchmark papers and the OpenCompass leaderboard [26].", + "bbox": [ + 133, + 542, + 859, + 599 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "3.6 Comprehensive Multimodal Evaluation", + "text_level": 1, + "bbox": [ + 135, + 666, + 452, + 681 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "The comprehensive multimodal evaluation is based on established benchmarks including MME [37], MMBench (evaluating both English and Chinese tasks) [75], MMBench v1.1 (English) [75], MMVet [138], MMVet v2 [139], and MMStar [13], as summarized in Table 5. Specifically, InternVL3-1B achieves an MMBench score of 72.6/67.9 (English/Chinese) and improves the MMBench v1.1 score to 69.9, compared to the InternVL2.5-1B baseline (70.7/66.3 and 68.4, respectively). The improvements become more pronounced at the 2B scale, where InternVL3-2B records an MME of 2221.2 and reaches an MMBench performance of 81.1/78.4, along with an MMBench v1.1 score of 78.6.", + "bbox": [ + 133, + 710, + 861, + 808 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "At larger scales, InternVL3 models consistently demonstrate superior performance. For example, the InternVL3-8B model achieves an MME of 2415.4, while the InternVL3-38B and InternVL3-78B models record MME scores of 2523.6 and 2549.8, respectively. The corresponding MMBench and MMBench v1.1 scores also show steady improvements, with InternVL3-78B attaining 89.0/88.7 for English/Chinese and 87.7 for English-only tasks. When compared with other competitive models, such as Qwen2-VL-72B and Qwen2.5-VL-72B, the InternVL3 series—especially the 78B variant—offers a consistent performance advantage on the multimodal understanding benchmarks.", + "bbox": [ + 133, + 813, + 864, + 912 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 490, + 935, + 509, + 946 + ], + "page_idx": 11 + }, + { + "type": "table", + "img_path": "images/4176fc7555918689b16ab5d68173c5f141920017a405d4eef3a48bf0f90258c7.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Model NameRefCOCORefCOCO+RefCOCOg
valtest-Atest-Bvaltest-Atest-Bvaltest
Grounding-DINO-L [74]90.693.288.282.889.075.986.187.086.6
UNINEXT-H [133]92.694.391.585.289.679.888.789.488.9
ONE-PEACE [122]92.694.289.388.892.283.289.289.389.8
Qwen2.5-VL-3B [6]89.191.784.082.488.074.185.285.785.0
InternVL3-1B85.890.181.776.684.169.282.882.681.6
InternVL3-2B89.892.686.484.089.276.587.687.286.7
Shikra-7B [12]87.090.680.281.687.472.182.382.282.9
Ferret-v2-13B [144]92.695.088.987.492.181.489.490.089.6
CogVLM-Grounding [123]92.894.889.088.792.983.489.890.890.3
MM1.5 [143]-92.586.7-88.777.8-87.1-
Qwen2-VL-7B [121]91.793.687.385.890.579.587.387.887.9
Qwen2.5-VL-7B [6]90.092.585.484.289.176.987.287.286.6
TextHawk2 [140]91.993.087.686.290.080.488.288.188.2
InternVL2-8B [19]87.191.180.779.887.971.482.782.782.9
InternVL2.5-8B [18]90.394.585.985.291.578.886.787.687.6
InternVL3-8B92.594.688.088.292.581.889.690.089.6
InternVL3-9B91.893.286.686.491.079.988.088.588.2
InternVL3-14B92.094.487.887.492.181.588.689.389.1
Qwen2-VL-72B [121]93.295.390.790.193.885.689.990.491.1
Qwen2.5-VL-72B [6]92.794.689.788.992.283.789.990.390.3
InternVL2-Llama3-76B [19]92.294.888.488.893.182.889.590.390.0
InternVL2.5-78B [18]93.795.692.590.494.786.992.792.292.3
InternVL3-38B93.295.190.289.893.285.291.491.591.2
InternVL3-78B93.495.490.390.193.885.391.591.591.4
", + "bbox": [ + 158, + 87, + 834, + 402 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Table 6: Comparison of visual grounding performance. We evaluate InternVL's visual grounding capability on RefCOCO, RefCOCO+, and RefCOCOg datasets [56, 88]. Parts of the results are collected from [121].", + "bbox": [ + 135, + 409, + 859, + 439 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "3.7 Multimodal Hallucination Evaluation", + "text_level": 1, + "bbox": [ + 135, + 465, + 439, + 481 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "We evaluate InternVL's propensity for hallucinations on four established benchmarks—HallusionBench [45], MMHal-Bench [111], CRPE [126], and POPE [67]—as detailed in Table 5. In comparison with previous InternVL series, the new InternVL3 models demonstrate overall competitive performance across varying scales, while providing consistent improvements in handling multimodal hallucination challenges. In the small-parameter regime, InternVL3-1B attains a HallusionBench score of 41.4, representing an appreciable gain over the InternVL2.5-1B baseline, which scored 39.0. Similarly, the 2B variant of InternVL3 shows a comparable HallusionBench performance (42.5) to its InternVL2.5 counterpart (42.6), while registering a modest improvement in CRPE performance (71.5 vs. 70.2).", + "bbox": [ + 133, + 492, + 861, + 604 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "In the large-scale setting, InternVL3-38B and InternVL3-78B are particularly noteworthy. InternVL3-38B obtains a HallusionBench score of 57.1, while InternVL3-78B reaches 59.1, accompanied by a CRPE improvement to 79.2. These figures position the InternVL3 series as competitive with leading closed- and open-source models such as GPT-4o and the Qwen2.5-VL series. Despite these advancements, minor declines on certain benchmarks, such as MMHal, indicate that although the InternVL3 series has made overall progress, optimizing data and training strategies to achieve more consistent improvements remains an important direction for future work.", + "bbox": [ + 133, + 609, + 861, + 695 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "3.8 Visual Grounding", + "text_level": 1, + "bbox": [ + 135, + 712, + 303, + 728 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "We evaluate InternVL's visual grounding capability on the RefCOCO [56], RefCOCO+[56], and RefCOCOg[88] datasets, where the model is tasked with accurately localizing target objects in images from given textual descriptions. Table 6 shows a comprehensive comparison across various models, including several specialized grounding models as well as multiple MLLLMs.", + "bbox": [ + 133, + 738, + 859, + 796 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Among the smaller-scale models, we observe that while Qwen2.5-VL-3B achieves an average score of 85.0, the InternVL3-1B and InternVL3-2B models yield average scores of 81.6 and 86.7, respectively. Notably, when scaling up, the InternVL3 series exhibits promising improvements. InternVL3-8B, InternVL3-9B, and InternVL3-14B yield average scores around 88.2–89.6, reflecting a consistent trend of performance gains as the model size increases. However, when reaching larger scales, the performance gains appear to plateau. For instance, InternVL2.5-78B reaches an average score of 92.3, and InternVL3-78B only shows a score of 91.4. We speculate that this is because InternVL3's training data expansion does not include additional grounding-specific data and the relative reduction in grounding-targeted data could have restricted the localization capabilities.", + "bbox": [ + 133, + 800, + 861, + 912 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 12 + }, + { + "type": "table", + "img_path": "images/38f44bcbaf1d9cf8b391869abd3803e6dde210f593578e499d64e4125c709616.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Model NameMMMBMultilingual MMBenchMTVQA (avg)Overall
enzhptartrruenzhptartrru
InternVL2-1B [19]73.267.455.553.543.855.267.961.250.843.331.852.712.640.7
InternVL2.5-1B [18]78.870.261.555.045.361.172.564.757.043.037.853.221.446.0
InternVL3-1B79.470.162.358.047.661.972.666.262.348.039.560.322.247.9
Qwen2-VL-2B [121]78.374.272.668.361.872.872.171.169.961.154.469.320.052.6
Qwen2.5-VL-3B [6]------------24.8-
InternVL2-2B [19]79.471.654.043.546.448.173.869.651.429.831.342.310.939.3
InternVL2.5-2B [18]81.474.458.248.346.453.276.571.655.937.333.944.821.845.2
InternVL3-2B81.978.375.468.662.974.681.377.875.966.459.570.726.757.4
mPLUG-Owl2 [136]67.361.059.745.845.462.666.259.458.237.947.760.4--
Qwen2-VL-7B [121]83.982.481.279.074.782.481.881.679.175.674.579.325.661.6
Qwen2.5-VL-7B [6]------------29.2-
InternVL2-8B [19]83.481.576.166.369.275.782.981.876.060.566.074.420.956.6
InternVL2.5-8B [18]84.383.178.669.371.579.583.883.279.464.367.877.327.660.4
InternVL3-8B85.183.182.581.676.283.485.585.683.279.275.982.630.264.7
InternVL3-9B84.883.780.669.968.580.886.585.279.164.368.379.127.160.7
InternVL3-14B85.784.783.183.779.383.686.785.883.281.180.783.831.666.2
InternVL-Chat-V1.5 [19]82.680.876.365.268.674.081.180.276.956.266.771.020.555.7
InternVL2-26B [19]83.881.778.068.869.376.382.781.877.861.969.674.417.756.2
InternVL2.5-26B [18]86.283.881.673.373.782.886.185.580.767.575.079.628.562.6
InternVL2-40B [19]85.384.181.170.374.281.486.285.882.864.074.281.820.659.7
InternVL2.5-38B [18]86.485.184.184.382.884.987.588.685.384.584.085.931.767.4
InternVL3-38B86.785.684.584.882.685.189.089.387.184.684.387.432.468.1
GPT-4V [97]75.074.271.573.569.073.177.674.472.572.370.574.822.056.1
GPT-4o [97]------------27.8-
Gemini-1.0-Pro [114]75.071.970.669.969.672.773.672.170.361.169.870.5--
Qwen2-VL-72B [121]86.885.385.284.884.285.386.987.285.883.584.485.330.967.2
Qwen2.5-VL-72B [6]------------31.7-
InternVL2-Llama3-76B [19]85.385.182.882.883.083.787.887.385.983.185.085.722.063.9
InternVL2.5-78B [18]86.385.685.184.883.185.490.089.787.483.384.986.331.968.0
InternVL3-78B87.286.685.586.584.686.189.490.388.786.186.688.132.568.9
", + "bbox": [ + 138, + 88, + 859, + 474 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Table 7: Comparison of multimodal multilingual performance. We evaluate multilingual capabilities across 3 benchmarks, including MMMB [109], Multilingual MMBench [109] and MTVQA [113]. The languages evaluated are English (en), Chinese (zh), Portuguese (pt), Arabic (ar), Turkish (tr), and Russian (ru).", + "bbox": [ + 135, + 483, + 859, + 526 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "3.9 Multimodal Multilingual Understanding", + "text_level": 1, + "bbox": [ + 135, + 561, + 459, + 577 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "We assess InternVL's multimodal multilingual understanding capabilities using benchmarks—MMMB, Multilingual MMBench [109], and MTVQA [113]—as shown in Table 7. The InternVL3 series demonstrates consistent improvements in multilingual performance compared to previous predecessors. For example, the lightweight InternVL3-1B already shows a modest improvement over InternVL2.5-1B, while the larger-scale variants, such as InternVL3-38B and InternVL3-78B, achieve significantly higher average scores across all three benchmarks.", + "bbox": [ + 133, + 590, + 861, + 662 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Comparisons with other leading models further highlight the effectiveness of the InternVL3 series. Notably, the InternVL3 variants achieve performance that is competitive with or superior to models such as Qwen2-VL-72B [121] and Qwen2.5-VL-72B [6]. Overall, the enhanced performance of the InternVL3 series across MMMB, Multilingual MMBench, and MTVQA underscores the promise of our approach in advancing global multimodal applications.", + "bbox": [ + 133, + 667, + 861, + 738 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "3.10 Video Understanding", + "text_level": 1, + "bbox": [ + 135, + 763, + 334, + 779 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Video understanding is essential for evaluating how well MLLMs capture temporal and multimodal cues in complex video content. In this work, we assess the InternVL3 series on six established benchmarks—Video-MME [38], MVBench [65], MMBench-Video [35], MLVU [154], LongVideoBench [129], and CG-Bench [2], as detailed in Table 8.", + "bbox": [ + 133, + 792, + 861, + 849 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Overall, the InternVL3 models demonstrate clear performance improvements and a strong scalability trend over their predecessors. As the model capacity increases, the performance gains become more pronounced. For instance, InternVL3-2B records higher Video-MME scores (58.9/61.4) and improved MVBench and MLVU performance compared to the earlier 2B variants.", + "bbox": [ + 133, + 854, + 859, + 912 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 13 + }, + { + "type": "table", + "img_path": "images/63b137b26d089da6496b7caf50f6a13a7945a49735feef9c70d2644edfc567f2.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Model NameVideo-MME (wo / w sub)MVBenchMMBench-Video (val)MLVU (M-Avg)LongVideoBench (val total)CG-Bench (long / clue acc.)Overall
InternVL2-1B [19]42.9 / 45.457.51.1451.643.3--
InternVL2.5-1B [18]50.3 / 52.364.31.3657.347.9--
InternVL3-1B51.0 / 53.063.11.353.048.124.8 / 39.146.9
Qwen2-VL-2B [121]55.6 / 60.463.2-----
Qwen2.5-VL-3B [7]61.5 / 67.667.01.6368.243.3--
InternVL2-2B [19]46.2 / 49.160.21.3054.346.0--
InternVL2.5-2B [18]51.9 / 54.168.81.4461.452.0--
InternVL3-2B58.9 / 61.470.41.4264.255.430.8 / 50.754.9
VideoChat2-HD [64]45.3 / 55.762.31.2247.9---
MiniCPM-V-2.6 [135]60.9 / 63.6-1.70-54.9--
LLaVA-OneVision-7B [60]58.2 / -56.7-----
Qwen2-VL-7B [121]63.3 / 69.067.01.44-55.6--
Qwen2.5-VL-7B [7]65.1 / 71.669.61.7970.245.3--
InternVL2-8B [19]56.3 / 59.365.81.5764.054.6--
InternVL2.5-8B [18]64.2 / 66.972.01.6868.960.0--
InternVL3-8B66.3 / 68.975.41.6971.458.838.6 / 55.261.4
InternVL3-9B66.7 / 68.974.31.6970.862.541.1 / 58.062.3
InternVL3-14B70.4 / 73.076.61.7373.363.944.1 / 60.664.9
InternVL2-26B [19]57.0 / 60.267.51.6764.256.1--
InternVL2.5-26B66.9 / 69.275.21.8672.359.9--
Oryx-1.5-32B [78]67.3 / 74.970.11.5272.3---
Qwen2.5-VL-32B [7]70.5 / 77.9-1.93----
VILA-1.5-40B [71]60.1 / 61.1-1.6156.7---
InternVL2-40B [19]66.1 / 68.672.01.7871.060.6--
InternVL2.5-38B [18]70.7 / 73.174.41.8275.363.3--
InternVL3-38B72.7 / 75.076.91.8177.867.346.9 / 62.867.5
GPT-4V/4T [1]59.9 / 63.343.71.5349.259.1--
GPT-4o-20240513 [97]71.9 / 77.2-1.6364.666.7--
GPT-4o-20240806 [97]--1.87--41.8 / 58.3-
Gemini-1.5-Pro [102]75.0 / 81.3-1.30-64.040.1 / 56.4-
VideoLLaMA2-72B [23]61.4 / 63.162.0-----
LLaVA-OneVision-72B [60]66.2 / 69.559.4-66.461.3--
Qwen2-VL-72B [121]71.2 / 77.873.61.70--41.3 / 56.2-
Qwen2.5-VL-72B [7]73.3 / 79.170.42.0274.660.7--
InternVL2-Llama3-76B [19]64.7 / 67.869.61.7169.961.1--
InternVL2.5-78B [18]72.1 / 74.076.41.9775.763.642.2 / 58.566.0
InternVL3-78B72.7 / 75.778.71.8179.565.748.4 / 65.368.3
", + "bbox": [ + 140, + 87, + 854, + 561 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Table 8: Comparison of video understanding performance. We evaluate InternVL's video understanding capabilities across 6 benchmarks. For Video-MME [38], MMBench-Video [35], MLVU [154], and LongVideoBench [129], we test with four different settings: 16, 32, 48, and 64 frames, and report the maximum results. For MVBench [65], we conduct testing using 16 frames. For CG-Bench [2], we use 32 frames.", + "bbox": [ + 133, + 566, + 859, + 625 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "The scaling behavior of the InternVL3 series is further evident in the larger models. InternVL3-14B attains a Video-MME score of 70.4/73.0, while InternVL3-38B and InternVL3-78B push these metrics even higher, reaching scores of 72.7/75.0 and 72.7/75.7, respectively. Additionally, the inclusion of CG-Bench evaluations for the InternVL3 series provides further insight into long-range video reasoning, with performance steadily improving as model size increases—for example, InternVL3-78B attains 48.4/65.3 on CG-Bench.", + "bbox": [ + 133, + 651, + 859, + 722 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "When compared with other open-source models, the InternVL3 series demonstrates competitive advantages. For instance, while Qwen2.5-VL models achieve impressive results (with Qwen2.5-VL-72B scoring 73.3/79.1 on Video-MME), the InternVL3 series tends to outperform them in other metrics, such as MVBench and MLVU. Similarly, while closed-source systems like Gemini-1.5-Pro sometimes yield superior results on select benchmarks (e.g., Video-MME), the overall performance of InternVL3, especially at larger scales, is highly competitive.", + "bbox": [ + 133, + 727, + 859, + 811 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "3.11 GUI Grounding", + "text_level": 1, + "bbox": [ + 135, + 829, + 299, + 844 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "GUI grounding requires precise localization and understanding of interface elements, which is critical for applications like automated UI testing and assistive technologies. In Table 9, we report the performance on GUI grounding benchmarks, comparing InternVL3 with state-of-the-art multimodal and GUI-specific models. The results demonstrate that InternVL3 achieves competitive performance across different scales. On", + "bbox": [ + 133, + 854, + 861, + 912 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 14 + }, + { + "type": "table", + "img_path": "images/e5d9a7d6e77380d50286b1398b5cb57b706a8a9a17883f62629000da5c604ef9.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodGPT-4oGemini 2.0ClaudeAguvis-72BQwen2.5-VL-72BUI-TARS-72BInternVL3-8B-38B-72B
ScreenSpot18.184.083.089.287.188.479.585.688.7
ScreenSpot-V2-----90.381.488.390.9
", + "bbox": [ + 140, + 89, + 854, + 143 + ], + "page_idx": 15 + }, + { + "type": "table", + "img_path": "images/64d16e3afefe4dacafd737d1aa038b03d98fb9ed727577c0deda1cb366e1d542.jpg", + "table_caption": [ + "Table 9: Performance of InternVL3 and other models on GUI grounding benchmarks." + ], + "table_footnote": [], + "table_body": "
Model NameObj.countAbs.Dist.Obj.sizeRoom SizeRel.Dist.Rel.Dir.Route PlanAppr.OrderOverall
GPT-4o [97]46.25.343.838.237.041.331.528.534.0
Gemini-1.5 Pro [102]56.230.964.143.651.346.336.034.645.4
VILA-1.5-8B [71]17.421.850.318.832.134.831.024.828.9
LongVA-7B [145]38.016.638.922.233.143.325.415.729.2
LLaVA-NeXT-Video-7B [150]48.514.047.824.243.542.434.030.635.6
LLaVA-OneVision-7B [60]47.720.247.412.342.535.229.424.432.4
InternVL3-8B68.139.048.433.648.336.427.335.442.1
InternVL3-38B71.750.246.141.753.538.628.960.748.9
LLaVA-NeXT-Video-72B [150]48.922.857.435.342.436.735.048.640.9
LLaVA-OneVision-72B [60]43.523.957.637.542.539.932.544.640.2
InternVL3-78B71.253.744.439.555.939.528.954.548.4
", + "bbox": [ + 138, + 178, + 854, + 325 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Table 10: Performance of InternVL3 and other models on VSI-Bench.", + "bbox": [ + 253, + 332, + 741, + 347 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "ScreenSpot [22], InternVL3-72B achieves $88.7\\%$ accuracy, slightly outperforming UI-TARS-72B [100] $(88.4\\%)$ and Qwen2.5-VL-72B $(87.1\\%)$ , while Aguvis-72B [132] leads with $89.2\\%$ . Notably, InternVL3-38B $(85.6\\%)$ surpasses GPT-4o $(18.1\\%)$ and Gemini 2.0 $(84.0\\%)$ by a significant margin.", + "bbox": [ + 133, + 375, + 861, + 419 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "For the more challenging ScreenSpot-V2 [130] benchmark, InternVL3 exhibits strong scaling behavior: InternVL3-72B achieves $90.9\\%$ , outperforming UI-TARS-72B $(90.3\\%)$ . The 8B variant $(81.4\\%)$ already surpasses UI-TARS-72B, while the 38B model $(88.3\\%)$ further closes the gap to the 72B version. These results highlight InternVL3's robustness in GUI understanding tasks, particularly in handling complex screen layouts and dynamic interfaces. The performance improvements with model scale suggest that larger architectures better capture the fine-grained visual-textual alignments required for precise GUI grounding. The superior performance of the InternVL3 models highlights their robustness in interpreting complex visual layouts. Future work will explore extending these capabilities to more dynamic and interactive GUI environments.", + "bbox": [ + 133, + 422, + 861, + 536 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "3.12 Spatial Reasoning", + "text_level": 1, + "bbox": [ + 135, + 551, + 313, + 566 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Spatial reasoning involves constructing a mental representation of a three-dimensional environment from visual inputs—a capability that is vital for applications such as autonomous driving. Table 10 reports the performance results on the Visual-Spatial Intelligence Benchmark (VSI-Bench) [134], where InternVL3 is compared against other state-of-the-art MLLMs. The results clearly indicate that InternVL3 outperforms its competitors in spatial reasoning tasks. In particular, the InternVL3-8B variant achieves a score of 42.1, leading all open-source MLLMs in the benchmark. Moreover, the InternVL3-38B and InternVL3-78B variants score 48.9 and 48.4, respectively—both superior to proprietary models such as GPT-4o, Gemini-1.5 Flash, and Gemini-1.5 Pro.", + "bbox": [ + 133, + 577, + 861, + 676 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Furthermore, InternVL3 exhibits exceptional performance in several sub-category tasks within the benchmark. It attains a score of 71.2 in object counting, 53.7 in absolute distance estimation, 55.9 in relative distance estimation, and 54.5 in appearance order prediction, demonstrating its robust spatial reasoning capabilities. These promising results underscore the potential of InternVL3 for advancing 3D scene understanding, and future work will explore its integration into various downstream applications.", + "bbox": [ + 133, + 680, + 861, + 752 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "3.13 Evaluation on Language Capability", + "text_level": 1, + "bbox": [ + 135, + 767, + 433, + 782 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Table 11 presents the performance evaluation of language capabilities across a diverse array of benchmarks. These benchmarks cover comprehensive assessments in general knowledge, linguistic understanding, reasoning, mathematics, and coding tasks, such as MMLU [46], CMMLU [63], C-Eval [48], GAOKAO-Bench [149], TriviaQA [52], NaturalQuestions [58, 110], RACE [59], WinoGrande [103], HellaSwag [142], BigBench Hard [112], GSM8K-Test [25], MATH [47], TheoremQA [17], HumanEval [14], MBPP [4], and MBPP-CN [4].", + "bbox": [ + 133, + 792, + 861, + 866 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "In particular, the experiments conducted compare the performance of Qwen2.5 chat models against corresponding InternVL3 variants. Both model series share the same pre-trained Qwen2.5 base model as their initialization. After undergoing native multimodal pre-training followed by additional post-training, the In", + "bbox": [ + 133, + 869, + 861, + 912 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 15 + }, + { + "type": "table", + "img_path": "images/f5286d02710ce84555dfeebe50c2c99b828fdae3c8f2d0f77f6a37f2af63c8b8.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
DatasetVersionQwen2.5-0.5B ChatQwen2.5-1.5B ChatQwen2.5-7B ChatQwen2.5-14B ChatQwen2.5-32B ChatQwen2.5-72B Chat
InterVL3-1BInterVL3-2BInterVL3-8BInterVL3-14BInterVL3-38BInterVL3-78B
MMLU4d595a46.449.861.864.874.277.379.582.183.385.484.486.9
CMMLUc1336547.256.762.972.278.884.482.685.885.888.787.489.9
C-Eval2daf2453.559.066.273.377.884.581.485.686.589.288.189.5
GAOKAO4c31db30.946.653.767.781.389.586.991.290.893.591.093.1
TriviaQA2121ce24.221.539.841.255.851.565.167.465.870.174.074.7
NaturalQuestions3dceal8.28.515.215.917.928.219.731.419.731.023.839.0
C38c358f35.266.381.284.790.895.192.196.392.397.496.197.6
RACE-High69ee4f51.568.876.084.686.890.889.693.091.594.291.794.2
WinoGrandeb3677047.252.956.561.971.578.179.184.383.886.783.987.8
HellaSwage4271039.347.062.073.885.490.290.593.092.195.592.795.6
BBH5b92b021.534.539.752.065.777.473.082.585.587.785.485.2
GSM8K1d7fe439.047.261.672.580.183.182.488.484.789.788.290.5
MATH39342427.832.749.357.372.672.273.776.381.172.281.478.9
TheoremQA6f0af812.312.914.415.620.125.518.524.121.918.922.930.4
HumanEval8e312c27.439.051.862.882.378.181.178.189.087.887.282.3
MBPPa447ff38.547.551.460.774.369.376.775.183.777.486.876.7
MBPP-CN9114d519.630.634.445.864.464.475.467.277.875.476.076.0
Overall-33.542.451.659.269.472.973.476.677.478.978.980.5
", + "bbox": [ + 174, + 85, + 818, + 429 + ], + "page_idx": 16 + }, + { + "type": "table", + "img_path": "images/a882683b95d10eac481fb7abb163436df7ccda6ca735de98d6fb0212917f3480.jpg", + "table_caption": [ + "Table 11: Comparison of language model performance across multiple benchmarks. These results were obtained using the OpenCompass toolkit. We compare InternVL3 with Qwen2.5 Chat models, whose corresponding pre-trained base models are employed as the initialization of the language component in InternVL3. Please note that the evaluation scores of the Qwen2.5 series may differ from those officially reported, as we have adopted the prompt versions provided in the table across all datasets for OpenCompass evaluation." + ], + "table_footnote": [], + "table_body": "
V2PEδTextVQA valVizWiz valChartQA test avgDocVQA valAI2D testInfoVQA valGQA testSQA-I testPOPETiny LVLMMMMU valSEED v1 imageOverall
X-78.461.781.489.481.169.460.894.487.9348.552.675.675.2
1/25678.061.781.288.581.067.761.094.488.3345.352.975.975.0
1/6478.362.081.789.481.369.660.994.788.3345.752.376.175.3
1/1678.762.181.790.481.670.461.195.088.2345.053.376.175.6
1/479.062.282.491.081.871.761.294.988.1345.852.676.275.9
1/178.761.782.290.281.771.461.294.688.5347.252.476.175.7
", + "bbox": [ + 138, + 545, + 859, + 665 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Table 12: Performance of the pre-trained InternVL3-8B model on multimodal benchmarks with different positional encoding strategies. When employing V2PE, the impact of different positional increment values $\\delta$ is systematically evaluated.", + "bbox": [ + 138, + 671, + 859, + 713 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "ternVL3 series consistently demonstrates superior performance over the Qwen2.5 chat models across most evaluation benchmarks.", + "bbox": [ + 138, + 766, + 857, + 792 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "This observed enhancement in language capabilities primarily arises from several factors, including the integration of approximately $25\\%$ pure-language data, joint parameter optimization during native multimodal pre-training, and the extensive use of high-quality textual corpora during the subsequent post-training stage. Such an approach not only strengthens multimodal comprehension but also significantly enhances language proficiency. Consequently, even when derived from identical pre-trained base models, the integrated multimodal and pure-text training strategy employed by InternVL3 results in substantially improved performance in language capabilities compared to the specialized training pipeline designed for pure-text tasks used by the Qwen2.5 chat models.", + "bbox": [ + 138, + 800, + 859, + 910 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 491, + 935, + 506, + 946 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/ec927fbd80730a141c1d6b95f58d4e1e79c5b82d743ca43baae18e32892499e3.jpg", + "image_caption": [ + "Figure 3: Performance comparison on multimodal benchmarks under different training strategies. Native multimodal pre-training endows MLLMs with strong multimodal capabilities, even without further post-training." + ], + "image_footnote": [], + "bbox": [ + 140, + 92, + 854, + 359 + ], + "page_idx": 17 + }, + { + "type": "table", + "img_path": "images/161d8384d9e1a45f1bfa6d5a124a923c74d772f4c9ab302fb98daff5df6aad86.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ModelMPOMMMUMathVistaMathVisionMathVerseDynaMathWeMathLogicVistaOverall
InternVL3-1B43.447.213.818.14.214.731.124.6
43.445.818.818.75.813.429.825.1 (+0.5)
InternVL3-2B49.159.022.023.213.418.130.030.7
48.657.021.725.314.622.436.932.4 (+1.7)
InternVL3-8B61.967.424.736.922.832.743.241.4
62.771.629.339.825.537.144.144.3 (+2.9)
InternVL3-9B59.068.828.932.223.032.546.541.6
57.771.527.635.326.733.849.243.1 (+1.5)
InternVL3-14B67.170.531.238.827.938.149.946.2
67.175.137.244.431.343.051.249.9 (+3.7)
InternVL3-38B69.371.234.245.122.241.754.448.3
70.175.134.248.235.348.658.452.8 (+4.5)
InternVL3-78B72.274.035.244.231.742.553.550.5
72.279.043.151.035.146.155.954.6 (+4.1)
", + "bbox": [ + 140, + 415, + 857, + 612 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Table 13: Comparison of reasoning abilities before and after Mixed Preference Optimization (MPO).", + "bbox": [ + 143, + 618, + 844, + 633 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "3.14 Ablation Study", + "text_level": 1, + "bbox": [ + 135, + 662, + 290, + 678 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "The Effectiveness of Native Multimodal Pre-Training. To assess the effectiveness of native multimodal pre-training, we conduct experiments on the InternVL2-8B model while keeping its architecture, initialization parameters, and training data entirely unchanged. Traditionally, InternVL2-8B employs a training pipeline that begins with an MLP warmup phase for multimodal alignment, followed by an instruction-tuning stage. In our experiments, we substitute the conventional MLP warmup phase with our native multimodal pre-training process. This modification isolates the contribution of native multimodal pre-training to the overall multimodal capability of the model.", + "bbox": [ + 133, + 689, + 861, + 787 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "The evaluation results in Figure 3 show that the model with native multimodal pre-training exhibits performance on most benchmarks that is comparable to the fully multi-stage-trained InternVL2-8B baseline. Furthermore, when followed by instruction tuning on higher-quality data, the model demonstrates further performance gains across evaluated multimodal tasks. These findings underscore the efficiency of native multimodal pre-training in imparting powerful multimodal capabilities to MLLMs.", + "bbox": [ + 133, + 792, + 861, + 864 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "The Evaluation of Variable Visual Position Encoding. To promote the multimodal capabilities in long-context scenarios, InternVL3 employs Variable Visual Position Encoding (V2PE) in its visual embedding. However, in the original V2PE [42], this specialized positional encoding for visual tokens did not yield benefits on", + "bbox": [ + 133, + 869, + 861, + 912 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "multimodal tasks with moderate context lengths. To further explore the efficacy of V2PE in a broader setting, we incorporated it during the native multimodal pre-training stage and evaluated the InternVL3-8B pre-trained model on standard multimodal benchmarks.", + "bbox": [ + 133, + 90, + 864, + 133 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "As reported in Table 12, the introduction of V2PE leads to significant performance gains across most evaluation metrics. In addition, our ablation studies—by varying the positional increment $\\delta$ —reveal that even for tasks primarily involving short contexts, relatively small $\\delta$ values can achieve optimal performance. These findings provide important insights for future efforts aimed at refining position encoding strategies for visual tokens in MLLMs. It is important to note that, to ensure fair comparisons, all results elsewhere in this report maintain a fixed $\\delta = 1$ , except for the experimental results presented in Table 12.", + "bbox": [ + 133, + 138, + 859, + 223 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Mixed Preference Optimization. Here, we demonstrate the effectiveness of MPO. As shown in Table 13, models fine-tuned with MPO demonstrate superior reasoning performance across seven multimodal reasoning benchmarks compared to their counterparts without MPO. Specifically, InternVL3-78B and InternVL3-38B outperform their counterparts by 4.1 and 4.5 points, respectively. Notably, the training data used for MPO is a subset of that used for SFT, indicating that the performance improvements primarily stem from the training algorithm rather than the training data.", + "bbox": [ + 133, + 228, + 859, + 313 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "4 Conclusion", + "text_level": 1, + "bbox": [ + 135, + 334, + 264, + 349 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "We have introduced InternVL3, a significant advancement in the InternVL series that implements a native multimodal pre-training paradigm. By jointly learning linguistic and multimodal capabilities during the pretraining phase, InternVL3 avoids the training complexities and optimization challenges typically associated with post-hoc MLLM training pipelines. Through the incorporation of variable visual position encoding (V2PE) for extended multimodal contexts, advanced post-training strategies—such as supervised fine-tuning and mixed preference optimization—and test-time scaling, InternVL3 establishes a new open-source benchmark across a wide range of multimodal tasks, while simultaneously preserving robust linguistic competencies. Notably, InternVL3-78B attains a 72.2-point score on the MMMU benchmark, exceeding previous open-source MLLMs and reducing the performance gap relative to leading proprietary counterparts (e.g., Gemini-2.5 Pro). In line with our commitment to fostering community-driven innovation in multimodal large language models, we will publicly release InternVL3's training data and model weights, thereby encouraging further research and development in this rapidly evolving field.", + "bbox": [ + 133, + 366, + 864, + 532 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 137, + 551, + 232, + 566 + ], + "page_idx": 18 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ilge Akkaya, Florencia Leoni Aleman, Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, et al. Gpt-4 technical report. arXiv preprint arXiv:2303.08774, 2023. 15", + "[2] Anonymous. CG-bench: Clue-grounded question answering benchmark for long video understanding. In Submitted to The Thirteenth International Conference on Learning Representations, 2024. under review. 14, 15", + "[3] Anthropic. The claude 3 model family: Opus, sonnet, haiku. https://www.anthropic.com, 2024. 2, 8, 9, 10, 11, 12", + "[4] Jacob Austin, Augustus Odena, Maxwell Nye, Maarten Bosma, Henryk Michalewski, David Dohan, Ellen Jiang, Carrie Cai, Michael Terry, Quoc Le, et al. Program synthesis with large language models. arXiv preprint arXiv:2108.07732, 2021. 16", + "[5] Jinze Bai, Shuai Bai, Shusheng Yang, Shijie Wang, Sinan Tan, Peng Wang, Junyang Lin, Chang Zhou, and Jingren Zhou. Qwen-vl: A frontier large vision-language model with versatile abilities. arXiv preprint arXiv:2308.12966, 2023. 1, 2", + "[6] Shuai Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Sibo Song, Kai Dang, Peng Wang, Shijie Wang, Jun Tang, et al. Qwen2. 5-vl technical report. arXiv preprint arXiv:2502.13923, 2025. 11, 12, 13, 14", + "[7] Shuai Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Sibo Song, Kai Dang, Peng Wang, Shijie Wang, Jun Tang, et al. Qwen2.5-vl technical report. arXiv preprint arXiv:2502.13923, 2025. 1, 2, 9, 10, 15", + "[8] Loubna Ben Allal, Anton Lozhkov, Guilherme Penedo, Thomas Wolf, and Leandro von Werra. Smoll-m-corpus, 2024. 5" + ], + "bbox": [ + 151, + 575, + 859, + 909 + ], + "page_idx": 18 + }, + { + "type": "page_number", + "text": "19", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 18 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[9] Ali Furkan Biten, Ruben Tito, Andres Mafla, Lluis Gomez, Marçal Rusinol, Ernest Valveny, CV Jawahar, and Dimosthenis Karatzas. Scene text visual question answering. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 4291-4301, 2019. 6", + "[10] Jie Cao and Jing Xiao. An augmented benchmark dataset for geometric question answering through dual parallel text encoding. In Proceedings of the 29th International Conference on Computational Linguistics, pages 1511-1520, 2022. 6", + "[11] Shuaichen Chang, David Palzer, Jialin Li, Eric Fosler-Lussier, and Ningchuan Xiao. Mapqa: A dataset for question answering on choropleth maps. arXiv preprint arXiv:2211.08545, 2022. 6", + "[12] Keqin Chen, Zhao Zhang, Weili Zeng, Richong Zhang, Feng Zhu, and Rui Zhao. Shikra: Unleashing multimodal lmm's referential dialogue magic. arXiv preprint arXiv:2306.15195, 2023. 13", + "[13] Lin Chen, Jinsong Li, Xiaoyi Dong, Pan Zhang, Yuhang Zang, Zehui Chen, Haodong Duan, Jiaqi Wang, Yu Qiao, Dahua Lin, et al. Are we on the right way for evaluating large vision-language models? arXiv preprint arXiv:2403.20330, 2024. 12", + "[14] Mark Chen, Jerry Tworek, Heewoo Jun, Qiming Yuan, Henrique Ponde de Oliveira Pinto, Jared Kaplan, Harri Edwards, Yuri Burda, Nicholas Joseph, Greg Brockman, et al. Evaluating large language models trained on code. arXiv preprint arXiv:2107.03374, 2021. 16", + "[15] Qiaoling Chen, Diandian Gu, Guoteng Wang, Xun Chen, YingTong Xiong, Ting Huang, Qinghao Hu, Xin Jin, Yonggang Wen, Tianwei Zhang, et al. Internevo: Efficient long-sequence large language model training via hybrid parallelism and redundant sharding. arXiv preprint arXiv:2401.09149, 2024. 2, 7", + "[16] Qiguang Chen, Libo Qin, Jin Zhang, Zhi Chen, Xiao Xu, and Wanxiang Che. M3cot: A novel benchmark for multi-domain multi-step multi-modal chain-of-thought. arXiv preprint arXiv:2405.16473, 2024. 6", + "[17] Wenhu Chen, Ming Yin, Max Ku, Pan Lu, Yixin Wan, Xueguang Ma, Jianyu Xu, Xinyi Wang, and Tony Xia. Theoremqa: A theorem-driven question answering dataset. In Houda Bouamor, Juan Pino, and Kalika Bali, editors, Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, EMNLP 2023, Singapore, December 6-10, 2023, pages 7889-7901. Association for Computational Linguistics, 2023. 16", + "[18] Zhe Chen, Weiyun Wang, Yue Cao, Yangzhou Liu, Zhangwei Gao, Erfei Cui, Jinguo Zhu, Shenglong Ye, Hao Tian, Zhaoyang Liu, et al. Expanding performance boundaries of open-source multimodal models with model, data, and test-time scaling. arXiv preprint arXiv:2412.05271, 2024. 1, 2, 3, 5, 6, 9, 10, 11, 12, 13, 14, 15", + "[19] Zhe Chen, Weiyun Wang, Hao Tian, Shenglong Ye, Zhangwei Gao, Erfei Cui, Wenwen Tong, Kongzhi Hu, Jiapeng Luo, Zheng Ma, et al. How far are we to gpt-4v? closing the gap to commercial multimodal models with open-source suites. arXiv preprint arXiv:2404.16821, 2024. 1, 3, 10, 11, 12, 13, 14, 15", + "[20] Zhe Chen, Weiyun Wang, Hao Tian, Shenglong Ye, Zhangwei Gao, Erfei Cui, Wenwen Tong, Kongzhi Hu, Jiapeng Luo, Zheng Ma, et al. How far are we to gpt-4v? closing the gap to commercial multimodal models with open-source suites. arXiv preprint arXiv:2404.16821, 2024. 2, 3", + "[21] Zhe Chen, Jiannan Wu, Wenhai Wang, Weijie Su, Guo Chen, Sen Xing, Muyan Zhong, Qinglong Zhang, Xizhou Zhu, Lewei Lu, et al. Internvl: Scaling up vision foundation models and aligning for generic visual-linguistic tasks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 24185–24198, 2024. 1, 2, 3", + "[22] Kanzhi Cheng, Qiushi Sun, Yougang Chu, Fangzhi Xu, Yantao Li, Jianbing Zhang, and Zhiyong Wu. Seeclick: Harnessing gui grounding for advanced visual gui agents. arXiv preprint arXiv:2401.10935, 2024. 16", + "[23] Zesen Cheng, Sicong Leng, Hang Zhang, Yifei Xin, Xin Li, Guanzheng Chen, Yongxin Zhu, Wenqi Zhang, Ziyang Luo, Deli Zhao, et al. Videollama 2: Advancing spatial-temporal modeling and audio understanding in video-llms. arXiv preprint arXiv:2406.07476, 2024. 15", + "[24] Christopher Clark and Matt Gardner. Simple and effective multi-paragraph reading comprehension. In Proceedings of the Annual Meeting of the Association for Computational Linguistics, pages 845–855, 2018. 6", + "[25] Karl Cobbe, Vineet Kosaraju, Mohammad Bavarian, Mark Chen, Heewoo Jun, Lukasz Kaiser, Matthias Plappert, Jerry Tworek, Jacob Hilton, Reiichiro Nakano, et al. Training verifiers to solve math word problems. arXiv preprint arXiv:2110.14168, 2021. 16", + "[26] OpenCompass Contributors. Opencompass: A universal evaluation platform for foundation models. https://github.com/open-compass/opencompass, 2023. 9, 10, 11, 12" + ], + "bbox": [ + 145, + 90, + 861, + 912 + ], + "page_idx": 19 + }, + { + "type": "page_number", + "text": "20", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 19 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[27] X.AI Corp. Grok-1.5 vision preview: Connecting the digital and physical worlds with our first multimodal model. https://x.ai/blog/grok-1.5v, 2024.11", + "[28] Wenliang Dai, Nayeon Lee, Boxin Wang, Zhuolin Yang, Zihan Liu, Jon Barker, Tuomas Rintamaki, Moham-mad Shoeybi, Bryan Catanzaro, and Wei Ping. NvIm: Open frontier-class multimodal llms. arXiv preprint arXiv:2409.11402, 2024. 10", + "[29] Google Deepmind. Gemini 2.0 is now available to everyone. https://blog.google/technology/google-deepmind/gemini-model-updates-february-2025/, 202.9", + "[30] Google Deepmind. Introducing gemini 2.0: our new ai model for the agentic era. https://blog.google/technology/google-deepmind/google-gemini-ai-update-december-2024/, 2024.9", + "[31] Matt Deitke, Christopher Clark, Sangho Lee, Rohun Tripathi, Yue Yang, Jae Sung Park, Mohammadreza Salehi, Niklas Muennighoff, Kyle Lo, Luca Soldaini, et al. Molmo and pixmo: Open weights and open data for state-of-the-art multimodal models. arXiv preprint arXiv:2409.17146, 2024. 1, 10", + "[32] Xiaoyi Dong, Pan Zhang, Yuhang Zang, Yuhang Cao, Bin Wang, Linke Ouyang, Songyang Zhang, Haodong Duan, Wenwei Zhang, Yining Li, et al. Internlm-xcomposer2-4khd: A pioneering large vision-language model handling resolutions from 336 pixels to 4k hd. arXiv preprint arXiv:2404.06512, 2024. 1", + "[33] Haodong Duan, Junming Yang, Yuxuan Qiao, Xinyu Fang, Lin Chen, Yuan Liu, Xiaoyi Dong, Yuhang Zang, Pan Zhang, Jiaqi Wang, et al. Vlmevalkit: An open-source toolkit for evaluating large multi-modality models. In Proceedings of the 32nd ACM International Conference on Multimedia, pages 11198-11201, 2024. 7", + "[34] Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Amy Yang, Angela Fan, et al. The llama 3 herd of models. arXiv preprint arXiv:2407.21783, 2024. 10", + "[35] Xinyu Fang, Kangrui Mao, Haodong Duan, Xiangyu Zhao, Yining Li, Dahua Lin, and Kai Chen. Mmbench-video: A long-form multi-shot benchmark for holistic video understanding. arXiv preprint arXiv:2406.14515, 2024. 14, 15", + "[36] Li Fei-Fei, Rob Fergus, and Pietro Perona. Learning generative visual models from few training examples: An incremental bayesian approach tested on 101 object categories. In Conference on Computer Vision and Pattern Recognition Workshop, pages 178-178, 2004. 7", + "[37] Chaoyou Fu, Peixian Chen, Yunhang Shen, Yulei Qin, Mengdan Zhang, Xu Lin, Zhenyu Qiu, Wei Lin, Jinrui Yang, Xiawu Zheng, et al. Mme: A comprehensive evaluation benchmark for multimodal large language models. arXiv preprint arXiv:2306.13394, 2023. 12", + "[38] Chaoyou Fu, Yuhan Dai, Yondong Luo, Lei Li, Shuhuai Ren, Renrui Zhang, Zihan Wang, Chenyu Zhou, Yunhang Shen, Mengdan Zhang, et al. Video-mme: The first-ever comprehensive evaluation benchmark of multi-modal llms in video analysis. arXiv preprint arXiv:2405.21075, 2024. 14, 15", + "[39] Xingyu Fu, Yushi Hu, Bangzheng Li, Yu Feng, Haoyu Wang, Xudong Lin, Dan Roth, Noah A Smith, Wei-Chiu Ma, and Ranjay Krishna. Blink: Multimodal large language models can see but not perceive. arXiv preprint arXiv:2404.12390, 2024. 9, 11", + "[40] Jiahui Gao, Renjie Pi, Jipeng Zhang, Jiacheng Ye, Wanjun Zhong, Yufei Wang, Lanqing Hong, Jianhua Han, Hang Xu, Zhenguo Li, et al. G-llava: Solving geometric problem with multi-modal large language model. arXiv preprint arXiv:2312.11370, 2023. 6", + "[41] Zhangwei Gao, Zhe Chen, Erfei Cui, Yiming Ren, Weiyun Wang, Jinguo Zhu, Hao Tian, Shenglong Ye, Junjun He, Xizhou Zhu, et al. Mini-internvl: A flexible-transfer pocket multimodal model with $5\\%$ parameters and $90\\%$ performance. arXiv preprint arXiv:2410.16261, 2024. 3", + "[42] Junqi Ge, Ziyi Chen, Jintao Lin, Jinguo Zhu, Xihui Liu, Jifeng Dai, and Xizhou Zhu. V2pe: Improving multi-modal long-context capability of vision-language models with variable visual position encoding. arXiv preprint arXiv:2412.09616, 2024. 2, 3, 18", + "[43] Yash Goyal, Tejas Khot, Douglas Summers-Stay, Dhruv Batra, and Devi Parikh. Making the v in vqa matter: Elevating the role of image understanding in visual question answering. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6904-6913, 2017. 6", + "[44] Shuhao Gu, Jialing Zhang, Siyuan Zhou, Kevin Yu, Zhaohu Xing, Liangdong Wang, Zhou Cao, Jintao Jia, Zhuoyi Zhang, Yixuan Wang, et al. Infinity-mm: Scaling multimodal performance with large-scale and high-quality instruction data. arXiv preprint arXiv:2410.18558, 2024. 9, 10" + ], + "bbox": [ + 145, + 90, + 864, + 912 + ], + "page_idx": 20 + }, + { + "type": "page_number", + "text": "21", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 20 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[45] Tianrui Guan, Fuxiao Liu, Xiyang Wu, Ruiqi Xian, Zongxia Li, Xiaoyu Liu, Xijun Wang, Lichang Chen, Furong Huang, Yaser Yacoob, et al. Hallusionbench: An advanced diagnostic suite for entangled language hallucination & visual illusion in large vision-language models. arXiv preprint arXiv:2310.14566, 2023. 8, 12, 13", + "[46] Dan Hendrycks, Collin Burns, Steven Basart, Andy Zou, Mantas Mazeika, Dawn Song, and Jacob Steinhardt. Measuring massive multitask language understanding. In The International Conference on Learning Representations, 2020. 16", + "[47] Dan Hendrycks, Collin Burns, Saurav Kadavath, Akul Arora, Steven Basart, Eric Tang, Dawn Song, and Jacob Steinhardt. Measuring mathematical problem solving with the MATH dataset. In Joaquin Vanschoeren and Sai-Kit Yeung, editors, Proceedings of the Neural Information Processing Systems Track on Datasets and Benchmarks 1, NeurIPS Datasets and Benchmarks 2021, December 2021, virtual, 2021. 16", + "[48] Yuzhen Huang, Yuzhuo Bai, Zhihao Zhu, Junlei Zhang, Jinghan Zhang, Tangjun Su, Junteng Liu, Chuancheng Lv, Yikai Zhang, Yao Fu, et al. C-eval: A multi-level multi-discipline chinese evaluation suite for foundation models. Advances in Neural Information Processing Systems, 36, 2024. 16", + "[49] Zheng Huang, Kai Chen, Jianhua He, Xiang Bai, Dimosthenis Karatzas, Shijian Lu, and CV Jawahar. Icdar2019 competition on scanned receiptOCR and information extraction. In 2019 International Conference on Document Analysis and Recognition (ICDAR), pages 1516-1520. IEEE, 2019. 6", + "[50] Drew A Hudson and Christopher D Manning. Gqa: A new dataset for real-world visual reasoning and compositional question answering. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6700–6709, 2019. 6", + "[51] Dongfu Jiang, Xuan He, Huaye Zeng, Cong Wei, Max Ku, Qian Liu, and Wenhu Chen. Mantis: Interleaved multi-image instruction tuning. arXiv preprint arXiv:2405.01483, 2024. 9, 11", + "[52] Mandar Joshi, Eunsol Choi, Daniel S Weld, and Luke Zettlemoyer. Triviaqa: A large scale distantly supervised challenge dataset for reading comprehension. arXiv preprint arXiv:1705.03551, 2017. 16", + "[53] Seungjae Jung, Gunsoo Han, Daniel Wontae Nam, and Kyoung-Woon On. Binary classifier optimization for large language model alignment. arXiv preprint arXiv:2404.04656, 2024. 6", + "[54] Kushal Kafle, Brian Price, Scott Cohen, and Christopher Kanan. Dvqa: Understanding data visualizations via question answering. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5648-5656, 2018. 6", + "[55] Mehran Kazemi, Hamidreza Alvari, Ankit Anand, Jialin Wu, Xi Chen, and Radu Soricut. Geomverse: A systematic evaluation of large models for geometric reasoning. arXiv preprint arXiv:2312.12241, 2023. 6", + "[56] Sahar Kazemzadeh, Vicente Ordonez, Mark Matten, and Tamara Berg. Referitgame: Referring to objects in photographs of natural scenes. In Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing, pages 787-798, 2014. 13", + "[57] Aniruddha Kembhavi, Mike Salvato, Eric Kolve, Minjoon Seo, Hannaneh Hajishirzi, and Ali Farhadi. A diagram is worth a dozen images. In European Conference on Computer Vision, pages 235-251, 2016. 6, 7, 8, 10", + "[58] Tom Kwiatkowski, Jennimaria Palomaki, Olivia Redfield, Michael Collins, Ankur Parikh, Chris Alberti, Danielle Epstein, Illia Polosukhin, Jacob Devlin, Kenton Lee, et al. Natural questions: a benchmark for question answering research. Transactions of the Association for Computational Linguistics, 7:453-466, 2019. 16", + "[59] Guokun Lai, Qizhe Xie, Hanxiao Liu, Yiming Yang, and Eduard Hovy. Race: Large-scale reading comprehension dataset from examinations. arXiv preprint arXiv:1704.04683, 2017. 16", + "[60] Bo Li, Yuanhan Zhang, Dong Guo, Renrui Zhang, Feng Li, Hao Zhang, Kaichen Zhang, Yanwei Li, Ziwei Liu, and Chunyuan Li. Llava-onevision: Easy visual task transfer. arXiv preprint arXiv:2408.03326, 2024. 9, 10, 11, 12, 15, 16", + "[61] Bohao Li, Yuying Ge, Yi Chen, Yixiao Ge, Ruimao Zhang, and Ying Shan. Seed-bench-2-plus: Benchmarking multimodal large language models with text-rich visual comprehension. arXiv preprint arXiv:2404.16790, 2024.8, 10", + "[62] Chunyi Li, Jianbo Zhang, Zicheng Zhang, Haoning Wu, Yuan Tian, Wei Sun, Guo Lu, Xiaohong Liu, Xiongkuo Min, Weisi Lin, et al. R-bench: Are your large multimodal model robust to real-world corruptions? arXiv preprint arXiv:2410.05474, 2024. 11", + "[63] Haonan Li, Yixuan Zhang, Fajri Koto, Yifei Yang, Hai Zhao, Yeyun Gong, Nan Duan, and Timothy Baldwin. Cmflu: Measuring massive multitask language understanding in chinese. arXiv preprint arXiv:2306.09212, 2023. 16" + ], + "bbox": [ + 145, + 90, + 861, + 911 + ], + "page_idx": 21 + }, + { + "type": "page_number", + "text": "22", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 21 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[64] KunChang Li, Yinan He, Yi Wang, Yizhuo Li, Wenhai Wang, Ping Luo, Yali Wang, Limin Wang, and Yu Qiao. Videochat: Chat-centric video understanding. arXiv preprint arXiv:2305.06355, 2023. 15", + "[65] Kunchang Li, Yali Wang, Yinan He, Yizhuo Li, Yi Wang, Yi Liu, Zun Wang, Jilan Xu, Guo Chen, Ping Luo, et al. Mvbench: A comprehensive multi-modal video understanding benchmark. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 22195-22206, 2024. 14, 15", + "[66] Yanghao Li, Chao-Yuan Wu, Haoqi Fan, Karttikeya Mangalam, Bo Xiong, Jitendra Malik, and Christoph Feichtenhofer. Mviv2: Improved multiscale vision transformers for classification and detection. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4804-4814, 2022. 1, 3", + "[67] Yifan Li, Yifan Du, Kun Zhou, Jinping Wang, Wayne Xin Zhao, and Ji-Rong Wen. Evaluating object hallucination in large vision-language models. In The Conference on Empirical Methods in Natural Language Processing, pages 292–305, 2023. 12, 13", + "[68] Zhang Li, Biao Yang, Qiang Liu, Zhiyin Ma, Shuo Zhang, Jingxu Yang, Yabo Sun, Yuliang Liu, and Xiang Bai. Monkey: Image resolution and text label are important things for large multi-modal models. arXiv preprint arXiv:2311.06607, 2023. 1", + "[69] Zhiqi Li, Guo Chen, Shilong Liu, Shihao Wang, Vibashan VS, Yishen Ji, Shiyi Lan, Hao Zhang, Yilin Zhao, Subhashree Radhakrishnan, et al. Eagle 2: Building post-training data strategies from scratch for frontier vision-language models. arXiv preprint arXiv:2501.14818, 2025. 1", + "[70] Hunter Lightman, Vineet Kosaraju, Yuri Burda, Harrison Edwards, Bowen Baker, Teddy Lee, Jan Leike, John Schulman, Ilya Sutskever, and Karl Cobbe. Let's verify step by step. In The Twelfth International Conference on Learning Representations, 2023. 7", + "[71] Ji Lin, Hongxu Yin, Wei Ping, Pavlo Molchanov, Mohammad Shoeybi, and Song Han. Vila: On pre-training for visual language models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 26689-26699, 2024. 1, 9, 10, 15, 16", + "[72] Adam Dahlgren Lindström and Savitha Sam Abraham. Clevr-math: A dataset for compositional language, visual and mathematical reasoning. arXiv preprint arXiv:2208.05358, 2022. 6", + "[73] Haotian Liu, Chunyuan Li, Qingyang Wu, and Yong Jae Lee. Visual instruction tuning. Advances in Neural Information Processing Systems, 36, 2023. 2", + "[74] Shilong Liu, Zhaoyang Zeng, Tianhe Ren, Feng Li, Hao Zhang, Jie Yang, Qing Jiang, Chunyuan Li, Jianwei Yang, Hang Su, et al. Grounding dino: Marrying dino with grounded pre-training for open-set object detection. In European Conference on Computer Vision, pages 38-55. Springer, 2025. 13", + "[75] Yuan Liu, Haodong Duan, Yuanhan Zhang, Bo Li, Songyang Zhang, Wangbo Zhao, Yike Yuan, Jiaqi Wang, Conghui He, Ziwei Liu, et al. Mmbench: Is your multi-modal model an all-around player? arXiv preprint arXiv:2307.06281, 2023. 12", + "[76] Yuliang Liu, Zhang Li, Hongliang Li, Wenwen Yu, Mingxin Huang, Dezhi Peng, Mingyu Liu, Mingrui Chen, Chunyuan Li, Lianwen Jin, et al. On the hidden mystery ofOCR in large multimodal models. arXiv preprint arXiv:2305.07895, 2023. 8, 10", + "[77] Zihan Liu, Yang Chen, Mohammad Shoeybi, Bryan Catanzaro, and Wei Ping. Acemath: Advancing frontier math reasoning with post-training and reward modeling. arXiv preprint, 2024. 5", + "[78] Zuyan Liu, Yuhao Dong, Ziwei Liu, Winston Hu, Jiwen Lu, and Yongming Rao. Oryx mllm: On-demand spatial-temporal understanding at arbitrary resolution. arXiv preprint arXiv:2409.12961, 2024. 15", + "[79] Dakuan Lu, Xiaoyu Tan, Rui Xu, Tianchu Yao, Chao Qu, Wei Chu, Yinghui Xu, and Yuan Qi. Scp-116k: A high-quality problem-solution dataset and a generalized pipeline for automated extraction in the higher education science domain, 2025. 5", + "[80] Pan Lu, Hritik Bansal, Tony Xia, Jiacheng Liu, Chunyuan Li, Hannaneh Hajishirzi, Hao Cheng, Kai-Wei Chang, Michel Galley, and Jianfeng Gao. Mathvista: Evaluating mathematical reasoning of foundation models in visual contexts. arXiv preprint arXiv:2310.02255, 2023. 7, 8, 9", + "[81] Pan Lu, Ran Gong, Shibiao Jiang, Liang Qiu, Siyuan Huang, Xiaodan Liang, and Song-Chun Zhu. Inter-gps: Interpretable geometry problem solving with formal language and symbolic reasoning. arXiv preprint arXiv:2105.04165, 2021. 6", + "[82] Pan Lu, Swaroop Mishra, Tanglin Xia, Liang Qiu, Kai-Wei Chang, Song-Chun Zhu, Oyvind Tafjord, Peter Clark, and Ashwin Kalyan. Learn to explain: Multimodal reasoning via thought chains for science question answering. Advances in Neural Information Processing Systems, 35:2507-2521, 2022. 6" + ], + "bbox": [ + 145, + 90, + 864, + 912 + ], + "page_idx": 22 + }, + { + "type": "page_number", + "text": "23", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 22 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[83] Pan Lu, Liang Qiu, Jiaqi Chen, Tony Xia, Yizhou Zhao, Wei Zhang, Zhou Yu, Xiaodan Liang, and Song-Chun Zhu. Iconqa: A new benchmark for abstract diagram understanding and visual language reasoning. arXiv preprint arXiv:2110.13214, 2021.6", + "[84] Shiyin Lu, Yang Li, Qing-Guo Chen, Zhao Xu, Weihua Luo, Kaifu Zhang, and Han-Jia Ye. Ovis: Structural embedding alignment for multimodal large language model. arXiv preprint arXiv:2405.20797, 2024. 9, 10", + "[85] Xudong Lu, Yinghao Chen, Cheng Chen, Hui Tan, Boheng Chen, Yina Xie, Rui Hu, Guanxin Tan, Renshou Wu, Yan Hu, et al. Bluelm-v-3b: Algorithm and system co-design for multimodal large language models on mobile devices. arXiv preprint arXiv:2411.10640, 2024. 1", + "[86] Yujie Lu, Dongfu Jiang, Wenhu Chen, William Yang Wang, Yejin Choi, and Bill Yuchen Lin. Wildvision: Evaluating vision-language models in the wild with human preferences. arXiv preprint arXiv:2406.11069, 2024. 11", + "[87] Liangchen Luo, Yinxiao Liu, Rosanne Liu, Samrat Phatale, Harsh Lara, Yunxuan Li, Lei Shu, Yun Zhu, Lei Meng, Jiao Sun, et al. Improve mathematical reasoning in language models by automated process supervision. arXiv preprint arXiv:2406.06592, 2, 2024. 7", + "[88] Junhua Mao, Jonathan Huang, Alexander Toshev, Oana Camburu, Alan L Yuille, and Kevin Murphy. Generation and comprehension of unambiguous object descriptions. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11–20, 2016. 13", + "[89] Andrés Marafioti, Orr Zohar, Miquel Farré, Merve Noyan, Elie Bakouch, Pedro Cuenca, Cyril Zakka, Loubna Ben Allal, Anton Lozhkov, Nouamane Tazi, et al. Smolvlm: Redefining small and efficient multimodal models. arXiv preprint arXiv:2504.05299, 2025. 1", + "[90] Kenneth Marino, Mohammad Rastegari, Ali Farhadi, and Roozbeh Mottaghi. Ok-vqa: A visual question answering benchmark requiring external knowledge. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3195-3204, 2019. 6", + "[91] Ahmed Masry, Xuan Long Do, Jia Qing Tan, Shafiq Joty, and Enamul Hoque. Chartqa: A benchmark for question answering about charts with visual and logical reasoning. In Proceedings of the Annual Meeting of the Association for Computational Linguistics, pages 2263-2279, 2022. 6, 7, 8, 10", + "[92] Minesh Mathew, Viraj Bagal, Ruben Tito, Dimosthenis Karatzas, Ernest Valveny, and CV Jawahar. Infographicvqa. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pages 1697-1706, 2022. 6, 7, 8, 10", + "[93] Minesh Mathew, Dimosthenis Karatzas, and CV Jawahar. Docvqa: A dataset for vqa on document images. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pages 2200–2209, 2021. 7, 8, 10", + "[94] Nat McAleese, Rai Michael Pokorny, Juan Felipe Ceron Uribe, Evgenia Nitishinskaya, Maja Trebacz, and Jan Leike. Llm critics help catch llm bugs. arXiv preprint arXiv:2407.00215, 2024. 7", + "[95] Fanqing Meng, Jin Wang, Chuanhao Li, Quanfeng Lu, Hao Tian, Jiaqi Liao, Xizhou Zhu, Jifeng Dai, Yu Qiao, Ping Luo, et al. Mmiu: Multimodal multi-image understanding for evaluating large vision-language models. arXiv preprint arXiv:2408.02718, 2024. 9, 11", + "[96] Anand Mishra, Shashank Shekhar, Ajeet Kumar Singh, and Anirban Chakraborty. Ocr-vqa: Visual question answering by reading text in images. In International Conference on Document Analysis and Recognition, pages 947-952, 2019. 6", + "[97] OpenAI. Gpt-4v(ison) system card. https://cdn.openai.com/papers/GPTV_System/Card.pdf, 2023.1,8,9,10,11,12,14,15,16", + "[98] OpenAI. Gpt-4o system card. https://openai.com/index/gpt-4o-system-card/, 2025.2,8", + "[99] Runqi Qiao, Qiuna Tan, Guanting Dong, Minhui Wu, Chong Sun, Xiaoshuai Song, Zhuoma GongQue, Shanglin Lei, Zhe Wei, Miaoxuan Zhang, et al. We-math: Does your large multimodal model achieve human-like mathematical reasoning? arXiv preprint arXiv:2407.01284, 2024. 8, 9", + "[100] Yujia Qin, Yining Ye, Junjie Fang, Haoming Wang, Shihao Liang, Shizuo Tian, Junda Zhang, Jiahao Li, Yunxin Li, Shijue Huang, et al. Ui-tars: Pioneering automated gui interaction with native agents. arXiv preprint arXiv:2501.12326, 2025. 16", + "[101] Rafael Rafailov, Archit Sharma, Eric Mitchell, Christopher D Manning, Stefano Ermon, and Chelsea Finn. Direct preference optimization: Your language model is secretly a reward model. Advances in Neural Information Processing Systems, 36, 2024. 6" + ], + "bbox": [ + 138, + 90, + 861, + 910 + ], + "page_idx": 23 + }, + { + "type": "page_number", + "text": "24", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 23 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[102] Machel Reid, Nikolay Savinov, Denis Teplyashin, Dmitry Lepikhin, Timothy Lillicrap, Jean-baptiste Alayrac, Radu Soricut, Angeliki Lazaridou, Orhan Firat, Julian Schrittwieser, et al. Gemini 1.5: Unlocking multimodal understanding across millions of tokens of context. arXiv preprint arXiv:2403.05530, 2024. 10, 11, 12, 15, 16", + "[103] Keisuke Sakaguchi, Ronan Le Bras, Chandra Bhagavatula, and Yejin Choi. Winogrande: An adversarial winograd schema challenge at scale. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 34, pages 8732-8740, 2020. 16", + "[104] Minjoon Seo, Hannaneh Hajishirzi, Ali Farhadi, Oren Etzioni, and Clint Malcolm. Solving geometry problems: Combining text and diagram interpretation. In Proceedings of the 2015 conference on empirical methods in natural language processing, pages 1466-1476, 2015. 6", + "[105] Min Shi, Fuxiao Liu, Shihao Wang, Shijia Liao, Subhashree Radhakrishnan, De-An Huang, Hongxu Yin, Karan Sapra, Yaser Yacoob, Humphrey Shi, et al. Eagle: Exploring the design space for multimodal llms with mixture of encoders. arXiv preprint arXiv:2408.15998, 2024. 1", + "[106] Wenhao Shi, Zhiqiang Hu, Yi Bin, Junhua Liu, Yang Yang, See-Kiong Ng, Lidong Bing, and Roy Ka-Wei Lee. Math-llava: Bootstrapping mathematical reasoning for multimodal large language models. arXiv preprint arXiv:2406.17294, 2024. 6", + "[107] Amanpreet Singh, Vivek Natarajan, Meet Shah, Yu Jiang, Xinlei Chen, Dhruv Batra, Devi Parikh, and Marcus Rohrbach. Towards vqa models that can read. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8317-8326, 2019. 6, 8, 10", + "[108] Charlie Snell, Jaehoon Lee, Kelvin Xu, and Aviral Kumar. Scaling llm test-time compute optimally can be more effective than scaling model parameters. arXiv preprint arXiv:2408.03314, 2024. 7", + "[109] Hai-Long Sun, Da-Wei Zhou, Yang Li, Shiyin Lu, Chao Yi, Qing-Guo Chen, Zhao Xu, Weihua Luo, Kaifu Zhang, De-Chuan Zhan, et al. Parrot: Multilingual visual instruction tuning. arXiv preprint arXiv:2406.02539, 2024. 14", + "[110] Kai Sun, Dian Yu, Dong Yu, and Claire Cardie. Investigating prior knowledge for challenging Chinese machine reading comprehension. Transactions of the Association for Computational Linguistics, 8:141-155, 2020. 16", + "[111] Zhiqing Sun, Sheng Shen, Shengcao Cao, Haotian Liu, Chunyuan Li, Yikang Shen, Chuang Gan, Liang-Yan Gui, Yu-Xiong Wang, Yiming Yang, et al. Aligning large multimodal models with factually augmented rlhf. arXiv preprint arXiv:2309.14525, 2023. 12, 13", + "[112] Mirac Suzgun, Nathan Scales, Nathanael Schärli, Sebastian Gehrmann, Yi Tay, Hyung Won Chung, Aakanksha Chowdhery, Quoc V Le, Ed H Chi, Denny Zhou, et al. Challenging big-bench tasks and whether chain-of-thought can solve them. arXiv preprint arXiv:2210.09261, 2022. 16", + "[113] Jingqun Tang, Qi Liu, Yongjie Ye, Jinghui Lu, Shu Wei, Chunhui Lin, Wanqing Li, Mohamad Fitri Faiz Bin Mahmood, Hao Feng, Zhen Zhao, et al. Mtvqa: Benchmarking multilingual text-centric visual question answering. arXiv preprint arXiv:2405.11985, 2024. 14", + "[114] Gemini Team, Rohan Anil, Sebastian Borgeaud, Yonghui Wu, Jean-Baptiste Alayrac, Jiahui Yu, Radu Soricut, Johan Schalkwyk, Andrew M Dai, Anja Hauth, et al. Gemini: a family of highly capable multimodal models. arXiv preprint arXiv:2312.11805, 2023. 1, 14", + "[115] Qwen Team. Qvq: To see the world with wisdom, December 2024. 9", + "[116] Shengbang Tong, Ellis Brown, Penghao Wu, Sanghyun Woo, Manoj Middepogu, Sai Charitha Akula, Jihan Yang, Shusheng Yang, Adithya Iyer, Xichen Pan, et al. Cambrian-1: A fully open, vision-centric exploration of multimodal llms. arXiv preprint arXiv:2406.16860, 2024. 9, 10, 11, 12", + "[117] v DeepMind. Gemini 2.5 pro. https://deepmind.google/technologies/gemini/pro/, 2025. 1, 2, 8", + "[118] Fei Wang, Xingyu Fu, James Y Huang, Zekun Li, Qin Liu, Xiaogeng Liu, Mingyu Derek Ma, Nan Xu, Wenxuan Zhou, Kai Zhang, et al. Muirbench: A comprehensive benchmark for robust multi-image understanding. arXiv preprint arXiv:2406.09411, 2024. 9, 11", + "[119] Ke Wang, Junting Pan, Weikang Shi, Zimu Lu, Mingjie Zhan, and Hongsheng Li. Measuring multimodal mathematical reasoning with math-vision dataset. arXiv preprint arXiv:2402.14804, 2024. 8, 9", + "[120] Peiyi Wang, Lei Li, Zhihong Shao, RX Xu, Damai Dai, Yifei Li, Deli Chen, Yu Wu, and Zhifang Sui. Math-shepherd: Verify and reinforce lms step-by-step without human annotations. arXiv preprint arXiv:2312.08935, 2023. 7", + "[121] Peng Wang, Shuai Bai, Sinan Tan, Shijie Wang, Zhihao Fan, Jinze Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, et al. Qwen2-vl: Enhancing vision-language model's perception of the world at any resolution. arXiv preprint arXiv:2409.12191, 2024. 1, 8, 10, 11, 12, 13, 14, 15" + ], + "bbox": [ + 135, + 90, + 861, + 911 + ], + "page_idx": 24 + }, + { + "type": "page_number", + "text": "25", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 24 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[122] Peng Wang, Shijie Wang, Junyang Lin, Shuai Bai, Xiaohuan Zhou, Jingren Zhou, Xinggang Wang, and Chang Zhou. One-peace: Exploring one general representation model toward unlimited modalities. arXiv:2305.11172, 2023. 13", + "[123] Weihan Wang, Qingsong Lv, Wenmeng Yu, Wenyi Hong, Ji Qi, Yan Wang, Junhui Ji, Zhuoyi Yang, Lei Zhao, Xixuan Song, et al. Cogvlm: Visual expert for pretrained language models. arXiv preprint arXiv:2311.03079, 2023. 1, 13", + "[124] Weiyun Wang, Zhe Chen, Wenhai Wang, Yue Cao, Yangzhou Liu, Zhangwei Gao, Jinguo Zhu, Xizhou Zhu, Lewei Lu, Yu Qiao, and Jifeng Dai. Enhancing the reasoning ability of multimodal large language models via mixed preference optimization. arXiv preprint arXiv:2411.10442, 2024. 2, 6, 7", + "[125] Weiyun Wang, Zhangwei Gao, Lianjie Chen, Zhe Chen, Jinguo Zhu, Xiangyu Zhao, Yangzhou Liu, Yue Cao, Shenglong Ye, Xizhou Zhu, et al. Visualprm: An effective process reward model for multimodal reasoning. arXiv preprint arXiv:2503.10291, 2025. 2, 7, 8, 9", + "[126] Weiyun Wang, Yiming Ren, Haowen Luo, Tiantong Li, Chenxiang Yan, Zhe Chen, Wenhai Wang, Qingyun Li, Lewei Lu, Xizhou Zhu, et al. The all-seeing project v2: Towards general relation comprehension of the open world. arXiv preprint arXiv:2402.19474, 2024. 6, 12, 13", + "[127] Weiyun Wang, Min Shi, Qingyun Li, Wenhai Wang, Zhenhang Huang, Linjie Xing, Zhe Chen, Hao Li, Xizhou Zhu, Zhiguo Cao, et al. The all-seeing project: Towards panoptic visual recognition and understanding of the open world. In The International Conference on Learning Representations, 2024. 6", + "[128] Zirui Wang, Mengzhou Xia, Luxi He, Howard Chen, Yitao Liu, Richard Zhu, Kaiqu Liang, Xindi Wu, Haotian Liu, Sadhika Malladi, et al. Charxiv: Charting gaps in realistic chart understanding in multimodal llms. arXiv preprint arXiv:2406.18521, 2024. 8, 10", + "[129] Haoning Wu, Dongxu Li, Bei Chen, and Junnan Li. Longvideobench: A benchmark for long-context interleaved video-language understanding. arXiv preprint arXiv:2407.15754, 2024. 8, 14, 15", + "[130] Zhiyong Wu, Zhenyu Wu, Fangzhi Xu, Yian Wang, Qiushi Sun, Chengyou Jia, Kanzhi Cheng, Zichen Ding, Liheng Chen, Paul Pu Liang, et al. Os-atlas: A foundation action model for generalist gui agents. arXiv preprint arXiv:2410.23218, 2024. 16", + "[131] Yijia Xiao, Edward Sun, Tianyu Liu, and Wei Wang. Logicvista: Multimodal llm logical reasoning benchmark in visual contexts. arXiv preprint arXiv:2407.04973, 2024. 8, 9", + "[132] Yiheng Xu, Zekun Wang, Junli Wang, Dunjie Lu, Tianbao Xie, Amrita Saha, Doyen Sahoo, Tao Yu, and Caiming Xiong. Aguvis: Unified pure vision agents for autonomous gui interaction. 2024. 16", + "[133] B. Yan, Yi Jiang, Jiannan Wu, D. Wang, Ping Luo, Zehuan Yuan, and Hutchuan Lu. Universal instance perception as object discovery and retrieval. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2023. 13", + "[134] Jihan Yang, Shusheng Yang, Anjali Gupta, Rilyn Han, Li Fei-Fei, and Saining Xie. Thinking in Space: How Multimodal Large Language Models See, Remember and Recall Spaces. arXiv preprint arXiv:2412.14171, 2024. 16", + "[135] Yuan Yao, Tianyu Yu, Ao Zhang, Chongyi Wang, Junbo Cui, Hongji Zhu, Tianchi Cai, Haoyu Li, Weilin Zhao, Zhihui He, et al. Minicpm-v: A gpt-4v level mllm on your phone. arXiv preprint arXiv:2408.01800, 2024. 9, 10, 11, 12, 15", + "[136] Qinghao Ye, Haiyang Xu, Jiabo Ye, Ming Yan, Haowei Liu, Qi Qian, Ji Zhang, Fei Huang, and Jingren Zhou. mplug-owl2: Revolutionizing multi-modal large language model with modality collaboration. arXiv preprint arXiv:2311.04257, 2023. 1, 14", + "[137] Kaining Ying, Fanqing Meng, Jin Wang, Zhiqian Li, Han Lin, Yue Yang, Hao Zhang, Wenbo Zhang, Yuqi Lin, Shuo Liu, Jiayi Lei, Quanfeng Lu, Runjian Chen, Peng Xu, Renrui Zhang, Haozhe Zhang, Peng Gao, Yali Wang, Yu Qiao, Ping Luo, Kaipeng Zhang, and Wenqi Shao. Mmt-bench: A comprehensive multimodal benchmark for evaluating large vision-language models towards multitask agi. arXiv preprint arXiv:2404.16006, 2024. 9, 11", + "[138] Weihao Yu, Zhengyuan Yang, Linjie Li, Jianfeng Wang, Kevin Lin, Zicheng Liu, Xinchao Wang, and Lijuan Wang. Mm-vet: Evaluating large multimodal models for integrated capabilities. arXiv preprint arXiv:2308.02490, 2023. 12", + "[139] Weihao Yu, Zhengyuan Yang, Linfeng Ren, Linjie Li, Jianfeng Wang, Kevin Lin, Chung-Ching Lin, Zicheng Liu, Lijuan Wang, and Xinchao Wang. Mm-vet2: A challenging benchmark to evaluate large multimodal models for integrated capabilities. arXiv preprint arXiv:2408.00765, 2024. 12", + "[140] Ya-Qi Yu, Minghui Liao, Jiwen Zhang, and Jihao Wu. Texthawk2: A large vision-language model excels in bilingualOCR and grounding with 16x fewer tokens. arXiv preprint arXiv:2410.05261, 2024. 13" + ], + "bbox": [ + 135, + 90, + 861, + 912 + ], + "page_idx": 25 + }, + { + "type": "page_number", + "text": "26", + "bbox": [ + 488, + 935, + 509, + 946 + ], + "page_idx": 25 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[141] Xiang Yue, Yuansheng Ni, Kai Zhang, Tianyu Zheng, Ruoqi Liu, Ge Zhang, Samuel Stevens, Dongfu Jiang, Weiming Ren, Yuxuan Sun, et al. Mmmu: A massive multi-discipline multimodal understanding and reasoning benchmark for expert agi. arXiv preprint arXiv:2311.16502, 2023. 2, 7, 8, 9", + "[142] Rowan Zellers, Ari Holtzman, Yonatan Bisk, Ali Farhadi, and Yejin Choi. Hellaswag: Can a machine really finish your sentence? In Proceedings of the Annual Meeting of the Association for Computational Linguistics, pages 4791-4800, 2019. 16", + "[143] Haotian Zhang, Mingfei Gao, Zhe Gan, Philipp Dufter, Nina Wenzel, Forrest Huang, Dhruti Shah, Xianzhi Du, Bowen Zhang, Yanghao Li, et al. Mm1.5: Methods, analysis & insights from multimodal llm fine-tuning. arXiv preprint arXiv:2409.20566, 2024. 13", + "[144] Haotian Zhang, Haoxuan You, Philipp Dufter, Bowen Zhang, Chen Chen, Hong-You Chen, Tsu-Jui Fu, William Yang Wang, Shih-Fu Chang, Zhe Gan, et al. Ferret-v2: An improved baseline for referring and grounding with large language models. arXiv preprint arXiv:2404.07973, 2024. 13", + "[145] Peiyuan Zhang, Kaichen Zhang, Bo Li, Guangtao Zeng, Jingkang Yang, Yuanhan Zhang, Ziyue Wang, Haoran Tan, Chunyuan Li, and Ziwei Liu. Long context transfer from language to vision. arXiv preprint arXiv:2406.16852, 2024. 16", + "[146] Renrui Zhang, Dongzhi Jiang, Yichi Zhang, Haokun Lin, Ziyu Guo, Pengshuo Qiu, Aojun Zhou, Pan Lu, Kai-Wei Chang, Peng Gao, et al. Mathverse: Does your multi-modal llm truly see the diagrams in visual math problems? arXiv preprint arXiv:2403.14624, 2024.8, 9", + "[147] Renrui Zhang, Xinyu Wei, Dongzhi Jiang, Yichi Zhang, Ziyu Guo, Chengzhuo Tong, Jiaming Liu, Aojun Zhou, Bin Wei, Shanghang Zhang, et al. Mavis: Mathematical visual instruction tuning. arXiv preprint arXiv:2407.08739, 2024.6", + "[148] Tianyu Zhang, Suyuchen Wang, Lu Li, Ge Zhang, Perouz Taslakian, Sai Rajeswar, Jie Fu, Bang Liu, and Yoshua Bengio. Vcr: Visual caption restoration. arXiv preprint arXiv:2406.06462, 2024. 8, 10", + "[149] Xiaotian Zhang, Chunyang Li, Yi Zong, Zhengyu Ying, Liang He, and Xipeng Qiu. Evaluating the performance of large language models on gaokao benchmark. arXiv preprint arXiv:2305.12474, 2023. 16", + "[150] Y Zhang, B Li, H Liu, Y Lee, L Gui, D Fu, J Feng, Z Liu, and C Li. Llava next: A strong zero-shot video understanding model. 2024. 16", + "[151] Yi-Fan Zhang, Huanyu Zhang, Haochen Tian, Chaoyou Fu, Shuangqing Zhang, Junfei Wu, Feng Li, Kun Wang, Qingsong Wen, Zhang Zhang, et al. Mme-realworld: Could your multimodal llm challenge high-resolution real-world scenarios that are difficult for humans? arXiv preprint arXiv:2408.13257, 2024. 11", + "[152] Zhenru Zhang, Chujie Zheng, Yangzhen Wu, Beichen Zhang, Runji Lin, Bowen Yu, Dayiheng Liu, Jingren Zhou, and Junyang Lin. The lessons of developing process reward models in mathematical reasoning. arXiv preprint arXiv:2501.07301, 2025. 7", + "[153] Bingchen Zhao, Yongshuo Zong, Letian Zhang, and Timothy Hospedales. Benchmarking multi-image understanding in vision and language models: Perception, knowledge, reasoning, and multi-hop reasoning. arXiv preprint arXiv:2406.12742, 2024. 9, 11", + "[154] Junjie Zhou, Yan Shu, Bo Zhao, Boya Wu, Shitao Xiao, Xi Yang, Yongping Xiong, Bo Zhang, Tiejun Huang, and Zheng Liu. Mlvu: A comprehensive benchmark for multi-task long video understanding. arXiv preprint arXiv:2406.04264, 2024. 14, 15", + "[155] Chengke Zou, Xingang Guo, Rui Yang, Junyu Zhang, Bin Hu, and Huan Zhang. Dynamath: A dynamic visual benchmark for evaluating mathematical reasoning robustness of vision language models. arXiv preprint arXiv:2411.00836, 2024. 8, 9" + ], + "bbox": [ + 135, + 90, + 861, + 762 + ], + "page_idx": 26 + }, + { + "type": "page_number", + "text": "27", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 26 + } +] \ No newline at end of file diff --git a/data/2025/2504_10xxx/2504.10479/71273ce6-5170-4939-8354-af535b974810_model.json b/data/2025/2504_10xxx/2504.10479/71273ce6-5170-4939-8354-af535b974810_model.json new file mode 100644 index 0000000000000000000000000000000000000000..ef320d42d486cbacfb4d4d38d7844f77e0712393 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10479/71273ce6-5170-4939-8354-af535b974810_model.json @@ -0,0 +1,4005 @@ +[ + [ + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.263, + 0.061, + 0.709 + ], + "angle": 270, + "content": "arXiv:2504.10479v3 [cs.CV] 19 Apr 2025" + }, + { + "type": "title", + "bbox": [ + 0.155, + 0.123, + 0.844, + 0.174 + ], + "angle": 0, + "content": "InternVL3: Exploring Advanced Training and Test-Time Recipes for Open-Source Multimodal Models" + }, + { + "type": "text", + "bbox": [ + 0.168, + 0.226, + 0.835, + 0.325 + ], + "angle": 0, + "content": "Jinguo Zhu\\(^{1*}\\), Weiyun Wang\\(^{5,1*†}\\), Zhe Chen\\(^{4,1*†}\\), Zhaoyang Liu\\(^{1*†}\\), Shenglong Ye\\(^{1*}\\), Lixin Gu\\(^{1*}\\), Hao Tian\\(^{2*}\\), Yuchen Duan\\(^{6,1*†}\\), Weijie Su\\(^{1}\\), Jie Shao\\(^{4,1†}\\), Zhangwei Gao\\(^{7,1†}\\), Erfei Cui\\(^{7,1†}\\), Xuehui Wang\\(^{7,1†}\\), Yue Cao\\(^{4,1†}\\), Yangzhou Liu\\(^{4,1†}\\), Xingguang Wei\\(^{1†}\\), Hongjie Zhang\\(^{1}\\), Haomin Wang\\(^{7,1†}\\), Weiye Xu\\(^{1†}\\), Hao Li\\(^{1†}\\), Jiahao Wang\\(^{1†}\\), Nianchen Deng\\(^{1}\\), Songze Li\\(^{1}\\), Yinan He\\(^{1}\\), Tan Jiang\\(^{2}\\), Jiapeng Luo\\(^{2}\\), Yi Wang\\(^{1}\\), Conghui He\\(^{1}\\), Botian Shi\\(^{1}\\), Xingcheng Zhang\\(^{1}\\), Wenqi Shao\\(^{1}\\), Junjun He\\(^{1}\\), Yingtong Xiong\\(^{1}\\), Wenwen Qu\\(^{1}\\), Peng Sun\\(^{1}\\), Penglong Jiao\\(^{1}\\), Han Lv\\(^{1}\\), Lijun Wu\\(^{1}\\), Kaipeng Zhang\\(^{1}\\), Huipeng Deng\\(^{1}\\), Jiaye Ge\\(^{1}\\), Kai Chen\\(^{1}\\), Limin Wang\\(^{4,1}\\), Min Dou\\(^{1}\\), Lewei Lu\\(^{2}\\), Xizhou Zhu\\(^{3,1}\\), Tong Lu\\(^{4}\\), Dahua Lin\\(^{6,1}\\), Yu Qiao\\(^{1}\\), Jifeng Dai\\(^{3,1‡}\\), Wenhai Wang\\(^{6,1‡}\\)" + }, + { + "type": "text", + "bbox": [ + 0.194, + 0.327, + 0.796, + 0.358 + ], + "angle": 0, + "content": "\\(^{1}\\)Shanghai AI Laboratory \\(^{2}\\)SenseTime Research \\(^{3}\\)Tsinghua University \\(^{4}\\)Nanjing University \\(^{5}\\)Fudan University \\(^{6}\\)The Chinese University of Hong Kong \\(^{7}\\)Shanghai Jiao Tong University" + }, + { + "type": "text", + "bbox": [ + 0.315, + 0.371, + 0.684, + 0.384 + ], + "angle": 0, + "content": "Code: https://github.com/OpenGVLab/InternVL" + }, + { + "type": "text", + "bbox": [ + 0.272, + 0.385, + 0.726, + 0.397 + ], + "angle": 0, + "content": "Model: https://huggingface.co/OpenGVLab/InternVL3-78B" + }, + { + "type": "text", + "bbox": [ + 0.238, + 0.399, + 0.76, + 0.412 + ], + "angle": 0, + "content": "Data: https://huggingface.co/datasets/OpenGVLab/InternVL-Data" + }, + { + "type": "title", + "bbox": [ + 0.46, + 0.468, + 0.538, + 0.484 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.194, + 0.5, + 0.804, + 0.75 + ], + "angle": 0, + "content": "We introduce InternVL3, a significant advancement in the InternVL series featuring a native multimodal pre-training paradigm. Rather than adapting a text-only large language model (LLM) into a multimodal large language model (MLLM) that supports visual inputs, InternVL3 jointly acquires multimodal and linguistic capabilities from both diverse multimodal data and pure-text corpora during a single pre-training stage. This unified training paradigm effectively addresses the complexities and alignment challenges commonly encountered in conventional post-hoc training pipelines for MLLMs. To further improve performance and scalability, InternVL3 incorporates variable visual position encoding (V2PE) to support extended multimodal contexts, employs advanced post-training techniques such as supervised fine-tuning (SFT) and mixed preference optimization (MPO), and adopts test-time scaling strategies alongside an optimized training infrastructure. Extensive empirical evaluations demonstrate that InternVL3 delivers superior performance across a wide range of multi-modal tasks. In particular, InternVL3-78B achieves a score of 72.2 on the MMMU benchmark, setting a new state-of-the-art among open-source MLLMs. Its capabilities remain highly competitive with leading proprietary models, including ChatGPT-4o, Claude 3.5 Sonnet, and Gemini 2.5 Pro, while also maintaining strong pure-language proficiency. In pursuit of open-science principles, we will publicly release both the training data and model weights to foster further research and development in next-generation MLLMs." + }, + { + "type": "title", + "bbox": [ + 0.137, + 0.775, + 0.277, + 0.79 + ], + "angle": 0, + "content": "1 Introduction" + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.805, + 0.863, + 0.877 + ], + "angle": 0, + "content": "Multimodal large language models (MLLMs) [32, 66, 121, 21, 19, 123, 68, 114, 97, 136, 71, 31, 85, 117, 18, 89, 105, 69] have recently achieved or even surpassed human-level performance in a broad spectrum of tasks, underscoring their potential as a significant stride toward artificial general intelligence (AGI). Yet, the majority of leading MLLMs—both open-source and proprietary—are adapted from text-only large language models through sophisticated multi-stage pipelines [21, 19, 18, 5, 121, 7]. These “post-hoc” approaches are built upon the" + }, + { + "type": "page_footnote", + "bbox": [ + 0.136, + 0.884, + 0.627, + 0.912 + ], + "angle": 0, + "content": "* equal contribution; † interns at OpenGVLab, Shanghai AI Laboratory; corresponding authors (daijifeng@tsinghua.edu.cn, wangwenhai@pjlab.org.cn)." + } + ], + [ + { + "type": "table", + "bbox": [ + 0.142, + 0.091, + 0.859, + 0.428 + ], + "angle": 0, + "content": "
InternVL2.5 78BInternVL3 8BInternVL3 78BQwen2.5-VL 72BOther Open-Source MLLMsClaude-3.5 SonnetChatGPT-4o-latestGemini-2.5 Pro
Model WeightsXXX
Training DataXX-XXX
MMMU Multi-discipline70.1%65.6%72.2% (2.1 ↑)70.2%64.5%66.4%72.9%74.7%
MathVista Math72.3%75.2%79.6% (7.3 ↑)74.8%70.5%65.1%71.6%80.9%
AI2D Diagrams89.1%85.2%89.7% (0.6 ↑)88.7%88.1%81.2%86.3%89.5%
ChartQA Charts88.3%86.6%89.7% (1.4 ↑)89.5%88.3%90.8%--
DocVQA Documents95.1%92.7%95.4% (0.3 ↑)96.4%96.5%95.2%--
InfographicVQA infographics84.1%76.8%85.2% (1.1 ↑)87.3%84.7%74.3%--
HallusionBench Hallucination57.4%49.9%59.1% (1.7 ↑)55.2%58.1%55.5%57.0%64.1%
OCRBench OCR854880906 (52↑)885877-894862
LongVideoBench Video63.6%58.8%65.7%(2.1↑)60.7%61.3%---
" + }, + { + "type": "table_caption", + "bbox": [ + 0.136, + 0.435, + 0.865, + 0.493 + ], + "angle": 0, + "content": "Figure 1: Multimodal performance of the InternVL series and other advanced MLLMs. The InternVL series has consistently exhibited progressive enhancements in multimodal capabilities. The newly released InternVL3 significantly outperforms existing open-source MLLMs. Moreover, even in comparison with state-of-the-art closed-source commercial models, InternVL3 continues to demonstrate highly competitive performance." + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.532, + 0.862, + 0.603 + ], + "angle": 0, + "content": "original text-based pre-training processes, thereby introducing alignment challenges when integrating additional modalities such as vision. In practice, bridging modality gaps often necessitates incorporating auxiliary data from specialized domains (e.g., optical character recognition scenarios) and intricate parameter-freezing or multi-stage fine-tuning schedules to ensure that core linguistic capacities remain uncompromised [73, 7, 5, 18]. Such resource-intensive strategies highlight the need for more efficient multimodal training paradigms." + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.608, + 0.861, + 0.693 + ], + "angle": 0, + "content": "In this report, we introduce InternVL3, the latest milestone in the InternVL series [21, 20, 18], which is distinguished by its native multimodal pre-training strategy. Rather than first pre-training a text-only large language model and subsequently retrofitting it via multimodal alignment to support visual processing, InternVL3 learns multimodal capabilities from the pre-training stage by jointly exposed to both text-only corpora and diverse multimodal datasets. This unified approach enables the model to simultaneously acquire linguistic and multimodal competencies in a more efficient and integrated manner." + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.698, + 0.862, + 0.769 + ], + "angle": 0, + "content": "InternVL3 further excels through multiple innovations that reinforce both performance and scalability. We employ a variable visual position encoding (V2PE) mechanism [42] to accommodate longer multimodal contexts. Furthermore, advanced post-training strategies—comprising supervised fine-tuning (SFT) and mixed preference optimization (MPO) [124]—together with test-time scaling strategies [125] and an optimized training infrastructure [15], significantly enhance InternVL3's efficiency and performance." + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.773, + 0.865, + 0.913 + ], + "angle": 0, + "content": "Comprehensive empirical evaluations demonstrate that InternVL3 surpasses its predecessors (e.g., InternVL2.5 [18]) across a wide range of tasks, including multi-discipline reasoning, document understanding, multi-image / video understanding, real-world comprehension, multimodal hallucination detection, visual grounding, and multilingual capabilities. Notably, by incorporating expanded domain-specific datasets, InternVL3 also exhibits marked improvements in tool usage, GUI agents, industrial image analysis, and spatial reasoning, thus substantially extending the multimodal scenarios addressed by the InternVL series. It proves highly competitive with other open-source MLLMs such as Qwen2.5-VL [7] and remains on par with closed-source models (e.g., ChatGPT-4o [98], Claude-3.5 Sonnet [3], Gemini-2.5 Pro [117]). This versatility is evidenced by its 72.2-point performance on the MMMU benchmark [141], setting a new standard among open-source MLLMs. Additionally, InternVL3 demonstrates language capabilities comparable to other advanced LLMs of similar scale." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.506, + 0.948 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.142, + 0.09, + 0.852, + 0.415 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.135, + 0.422, + 0.865, + 0.48 + ], + "angle": 0, + "content": "Figure 2: Performance of various MLLMs on the OpenCompass multimodal academic leaderboard. The enhanced InternVL series—InternVL3—demonstrates outstanding multimodal capabilities, significantly outperforming both the Qwen2.5-VL series and closed-source models such as Step-1o, GLM-4v-Plus, and GPT-4o. Remarkably, InternVL3-78B also remains highly competitive with the state-of-the-art Gemini-2.5-Pro." + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.504, + 0.861, + 0.548 + ], + "angle": 0, + "content": "To foster further advancements within the open-source community, we will release the training data1 and model weights alongside this work, thereby ensuring transparency and reproducibility for the continued development of next-generation MLLMs." + }, + { + "type": "title", + "bbox": [ + 0.137, + 0.566, + 0.264, + 0.584 + ], + "angle": 0, + "content": "2 InternVL3" + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.597, + 0.861, + 0.655 + ], + "angle": 0, + "content": "Building upon the prior InternVL series [21, 19, 18], we propose InternVL3, a new generation within the InternVL model family. InternVL3 is specifically designed to streamline the training pipeline while significantly enhancing multimodal capabilities. In this section, we first delineate the core components of InternVL3, including its model architecture, training procedures, test-time scaling strategies, and infrastructure-level optimizations." + }, + { + "type": "title", + "bbox": [ + 0.137, + 0.669, + 0.317, + 0.685 + ], + "angle": 0, + "content": "2.1 Model Architecture" + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.695, + 0.861, + 0.723 + ], + "angle": 0, + "content": "The architecture of InternVL3 follows the same general framework as its predecessors, adhering to the \"ViTMLP-LLM\" paradigm [66, 18, 41, 20]. Detailed architectural specifications are summarized in Table 1." + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.729, + 0.861, + 0.856 + ], + "angle": 0, + "content": "Although the native pre-training paradigm discussed later could enable training MLLMs from scratch, we choose to initialize the ViT and LLM components with pre-trained model weights to reduce computational costs. The vision encoder is available in two configurations: InternViT-300M and InternViT-6B. For the language model, we leverage pre-trained large language models (LLMs), specifically the Qwen2.5 series and InternLM3-8B. Importantly, our LLM components are initialized solely from pre-trained base models, without employing instruction-tuned variants. The multilayer perceptron (MLP) utilized in the model is a two-layer network with random initialization. In line with the approach taken in InternVL2.5, InternVL3 incorporates a pixel unshuffle operation to enhance scalability for processing high-resolution images. This operation reduces the visual token count to one-quarter of its original value, representing each \\(448 \\times 448\\) image tile with 256 visual tokens." + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.86, + 0.861, + 0.89 + ], + "angle": 0, + "content": "Variable Visual Position Encoding. InternVL3 also integrates the Variable Visual Position Encoding (V2PE) [42], which utilizes smaller, more flexible position increments for visual tokens. This modifica" + }, + { + "type": "page_footnote", + "bbox": [ + 0.158, + 0.898, + 0.859, + 0.913 + ], + "angle": 0, + "content": "1The open-source data are being organized, and a comprehensive list will be included in a future revision of this report." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.504, + 0.948 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.188, + 0.089, + 0.805, + 0.2 + ], + "angle": 0, + "content": "
Model Name#ParamVision EncoderLanguage ModelOpenCompass Academic
InternVL3-1B0.9BInternViT-300M-448px-V2.5Qwen2.5-0.5B57.4
InternVL3-2B1.9BInternViT-300M-448px-V2.5Qwen2.5-1.5B63.9
InternVL3-8B8.1BInternViT-300M-448px-V2.5Qwen2.5-7B73.3
InternVL3-9B9.2BInternViT-300M-448px-V2.5InternLM3-8B72.4
InternVL3-14B15.1BInternViT-300M-448px-V2.5Qwen2.5-14B75.5
InternVL3-38B38.4BInternViT-6B-448px-V2.5Qwen2.5-32B77.3
InternVL3-78B78.4BInternViT-6B-448px-V2.5Qwen2.5-72B79.5
" + }, + { + "type": "table_caption", + "bbox": [ + 0.136, + 0.209, + 0.862, + 0.239 + ], + "angle": 0, + "content": "Table 1: Pre-trained models used in the InternVL3 series. The OpenCompass scores for the InternVL3 series were obtained through our local testing." + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.273, + 0.862, + 0.304 + ], + "angle": 0, + "content": "tion facilitates the handling of longer multimodal contexts without excessively extending the position window. Specifically, each training sample for the MLLM is represented as:" + }, + { + "type": "equation", + "bbox": [ + 0.417, + 0.309, + 0.861, + 0.327 + ], + "angle": 0, + "content": "\\[\n\\mathbf {x} = \\left(x _ {1}, x _ {2}, \\dots , x _ {L}\\right), \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.332, + 0.862, + 0.374 + ], + "angle": 0, + "content": "where each token \\( x_{i} \\) can be a textual token embedding, a visual embedding, or another modality-specific representation (e.g., video patch embeddings). The position index \\( p_{i} \\) for any token \\( x_{i} \\) can be computed sequentially as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.35, + 0.375, + 0.861, + 0.409 + ], + "angle": 0, + "content": "\\[\np _ {i} = \\left\\{ \\begin{array}{l l} 0, & \\text {i f} i = 1, \\\\ f _ {\\text {p o s}} \\left(p _ {i - 1}, x _ {i}\\right), & \\text {f o r} i = 2, 3, \\dots , N. \\end{array} \\right. \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.412, + 0.862, + 0.455 + ], + "angle": 0, + "content": "In contrast to traditional MLLMs, where position indices increment uniformly by 1 for each token, irrespective of modality, V2PE employs a modality-specific recursive function for position index computation. This results in distinct position index assignments for textual and visual tokens:" + }, + { + "type": "equation", + "bbox": [ + 0.355, + 0.463, + 0.861, + 0.497 + ], + "angle": 0, + "content": "\\[\np _ {i} = p _ {i - 1} + \\left\\{ \\begin{array}{l l} 1, & \\text {i f} x _ {i} \\text {i s a t e x t u a l t o k e n ,} \\\\ \\delta , & \\text {i f} x _ {i} \\text {i s a v i s u a l t o k e n ,} \\end{array} \\right. \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.504, + 0.862, + 0.572 + ], + "angle": 0, + "content": "where \\(\\delta\\) is a smaller increment (\\(\\delta < 1\\)), reducing the rate at which position indices increase for visual tokens. The standard increment of 1 is retained for textual tokens to preserve their positional distinctions. In line with the original V2PE design, we maintain that \\(\\delta\\) remains constant within a single image to preserve the relative positional relationships. During training, \\(\\delta\\) is randomly chosen for each image from a predefined set of fractional values:" + }, + { + "type": "equation", + "bbox": [ + 0.348, + 0.571, + 0.861, + 0.6 + ], + "angle": 0, + "content": "\\[\n\\delta \\in \\Delta = \\left\\{1, \\frac {1}{2}, \\frac {1}{4}, \\frac {1}{8}, \\frac {1}{1 6}, \\frac {1}{3 2}, \\frac {1}{6 4}, \\frac {1}{1 2 8}, \\frac {1}{2 5 6} \\right\\}. \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.602, + 0.862, + 0.644 + ], + "angle": 0, + "content": "During inference, \\(\\delta\\) can be flexibly selected based on the input sequence length, enabling a balance between task performance and ensuring that position indices remain within the model's valid context range. Notably, when \\(\\delta = 1\\), V2PE reverts to the conventional positional encoding used in InternVL2.5." + }, + { + "type": "title", + "bbox": [ + 0.137, + 0.66, + 0.406, + 0.675 + ], + "angle": 0, + "content": "2.2 Native Multimodal Pre-Training" + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.685, + 0.862, + 0.812 + ], + "angle": 0, + "content": "We propose a native multimodal pre-training approach that consolidates language pre-training and multi-modal alignment training into a single pre-training stage. Unlike conventional paradigms—where a language-only large model is first trained (typically with language pre-training followed by language post-training) and subsequently adapted to accommodate additional modalities—our method performs integrated optimization by interleaving multimodal data (e.g., image-text, video-text, or interleaved image-text sequences) with large-scale textual corpora during the pre-training process. This unified training scheme enables the pre-trained model to learn both linguistic and multimodal capabilities simultaneously, ultimately enhancing its capability to handle vision-language tasks without introducing additional bridging modules or subsequent inter-model alignment procedures." + }, + { + "type": "text", + "bbox": [ + 0.136, + 0.822, + 0.862, + 0.867 + ], + "angle": 0, + "content": "Multimodal Autoregressive Formulation. Let \\(\\mathcal{M}\\) denote a Transformer-based model parameterized by \\(\\theta\\) that can process text, image, and video simultaneously. Specifically, for an arbitrary training sample \\(\\mathbf{x} = (x_{1}, x_{2}, \\ldots, x_{L})\\) with the token length of \\(L\\), we adopt the standard left-to-right autoregressive objective:" + }, + { + "type": "equation", + "bbox": [ + 0.329, + 0.875, + 0.861, + 0.915 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\text {f u l l}} (\\theta) = - \\sum_ {i = 2} ^ {L} w _ {i} \\cdot \\log p _ {\\theta} \\left(x _ {i} \\mid x _ {1}, \\dots , x _ {i - 1}\\right), \\tag {5}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.505, + 0.948 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.135, + 0.092, + 0.862, + 0.123 + ], + "angle": 0, + "content": "where \\( w_{i} \\) denotes the loss weight of token \\( i \\). Although this formulation naturally propagates gradients through tokens of all modalities, we restrict the loss computation exclusively to text tokens, resulting in:" + }, + { + "type": "equation", + "bbox": [ + 0.302, + 0.128, + 0.862, + 0.179 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\text {t e x t - o n l y}} (\\theta) = - \\sum_ {\\substack {i = 2 \\\\ x _ {i} \\in \\text {T e x t}}} ^ {L} w _ {i} \\cdot \\log p _ {\\theta} \\left(x _ {i} \\mid x _ {1}, \\dots , x _ {i - 1}\\right). \\tag{6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.185, + 0.865, + 0.27 + ], + "angle": 0, + "content": "Under this selective objective, visual tokens serve as conditioning context for text prediction and are not directly predicted. Consequently, the model learns to embed multimodal information in a manner that is beneficial for downstream language decoding tasks. Notably, regarding the design choice of the token weight \\( w_{i} \\), as discussed in InternVL2.5 [18], the widely used token averaging and sample averaging strategies can lead to gradients biased toward longer and shorter responses, respectively. To mitigate this issue, we adopt square averaging, which is defined as:" + }, + { + "type": "equation", + "bbox": [ + 0.374, + 0.285, + 0.862, + 0.339 + ], + "angle": 0, + "content": "\\[\nw _ {i} = \\left\\{ \\begin{array}{l l} \\frac {1}{l ^ {0}}, & \\text {f o r t o k e n a v e r a g i n g} \\\\ \\frac {1}{l ^ {0 . 5}}, & \\text {f o r s q u a r e a v e r a g i n g} \\\\ \\frac {1}{l ^ {1}}, & \\text {f o r s a m p l e a v e r a g i n g}, \\end{array} \\right. \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.136, + 0.348, + 0.804, + 0.364 + ], + "angle": 0, + "content": "where \\( l \\) denotes the number of tokens in the training sample on which the loss needs to be calculated." + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.374, + 0.862, + 0.414 + ], + "angle": 0, + "content": "Joint Parameter Optimization. Unlike the conventional \"language-only training followed by multimodal adaptation\" paradigm, our method updates all model parameters jointly during multimodal pre-training. Specifically, let" + }, + { + "type": "equation", + "bbox": [ + 0.367, + 0.415, + 0.861, + 0.439 + ], + "angle": 0, + "content": "\\[\n\\theta^ {*} = \\underset {\\theta} {\\arg \\min } \\mathbb {E} _ {\\mathbf {x} \\in \\mathcal {D} _ {\\text {m u l t i}}} \\left[ \\mathcal {L} _ {\\text {t e x t - o n l y}} (\\theta) \\right], \\tag {8}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.443, + 0.862, + 0.498 + ], + "angle": 0, + "content": "where \\(\\mathcal{D}_{\\mathrm{multi}}\\) is the union of large-scale text-only and multimodal corpora (e.g., image-text or video-text pairs). We thus optimize a single model to handle these combined data sources. This multi-task joint optimization ensures that text representations and visual features are learned in concert, reinforcing alignment across modalities." + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.505, + 0.862, + 0.59 + ], + "angle": 0, + "content": "Moreover, this integrated optimization departs from conventional \"language-only training followed by multimodal adaptation\" pipelines, which often freeze or partially fine-tune certain layers in the LLM component or even in the ViT encoder when adapting to MLLM. In contrast, our method trains every layer jointly, allowing all parameters to be jointly optimized on large-scale multimodal corpora and ensuring that both linguistic and visual features evolve synchronously. As a result, the final parameters are primed for high performance on both pure language and multimodal tasks, without additional tuning steps." + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.594, + 0.862, + 0.735 + ], + "angle": 0, + "content": "Data. The pre-training data utilized in InternVL3 is broadly classified into two categories: multimodal data and pure language data. The multimodal dataset comprises a synthesis of pre-existing datasets alongside newly acquired real-world data. Specifically, we leverage the pre-training corpus from InternVL2.5, which covers a diverse range of domains such as image captioning, general question answering, mathematics, charts, optical character recognition (OCR), knowledge grounding, document understanding, multi-turn dialogue, and medical data. Although the overall data scale was not increased, the utility of this dataset was significantly improved by updating not only to the MLP module weights but also to those associated with the ViT and LLM components. In addition, to enhance the model's ability to generalize in real-world applications, additional data is incorporated from tasks related to graphical user interfaces (GUI), tool usage, 3D scene understanding, and video comprehension." + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.739, + 0.862, + 0.825 + ], + "angle": 0, + "content": "To compensate for the relatively short and less diverse textual content typically found in multimodal datasets, we integrate pure language data into the pre-training process. This helps preserve and amplify the model's capabilities in language understanding and generation. The language corpus is primarily constructed on the pre-training data from InternLM2.5 and is further augmented with various open-source text datasets [8, 77, 79]. This enhancement aims to improve the model's performance on knowledge-intensive tasks, as well as its proficiency in mathematical and reasoning tasks." + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.828, + 0.862, + 0.913 + ], + "angle": 0, + "content": "Given the complexity of balancing these heterogeneous data sources, determining an appropriate sampling strategy is non-trivial. In InternVL3, we adopt a two-stage strategy to establish the optimal sampling ratio between multimodal and language data. Initially, we train separate models on the multimodal and language datasets and evaluate their performance on corresponding benchmarks, allowing us to identify optimal sampling ratios within each modality. Then, under a fixed total training budget, we combine the two modalities and determine their relative sampling ratio. Empirical studies show that a 1:3 ratio of language to multimodal data" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.505, + 0.948 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.135, + 0.092, + 0.865, + 0.135 + ], + "angle": 0, + "content": "yields the best overall performance across both unimodal and multimodal benchmarks. Under this configuration, the total number of training tokens is approximately 200 billion, comprising 50 billion from language data and 150 billion from multimodal data." + }, + { + "type": "title", + "bbox": [ + 0.137, + 0.151, + 0.276, + 0.167 + ], + "angle": 0, + "content": "2.3 Post-Training" + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.177, + 0.865, + 0.248 + ], + "angle": 0, + "content": "After the Native Multimodal Pre-Training, we apply a two-stage post-training strategy to further enhance the multimodal conversation and reasoning abilities of our models. This strategy consists of Supervised Fine-Tuning (SFT) and Mixed Preference Optimization (MPO). In the SFT phase, the model is trained to imitate the high-quality responses under positive supervision signals. In the subsequent MPO phase, we introduce additional supervision from both positive and negative samples, thereby further improving its overall abilities." + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.252, + 0.865, + 0.337 + ], + "angle": 0, + "content": "Supervised Fine-Tuning. In this phase, the techniques of random JPEG compression, square loss re-weighting, and multimodal data packing proposed in InternVL2.5 [18] are also employed in the InternVL3 series. The main advancement of the SFT phase in InternVL3 compared to InternVL2.5 lies in the use of higher-quality and more diverse training data. Specifically, we further extend training samples for tool usage, 3D scene understanding, GUI operations, long context tasks, video understanding, scientific diagrams, creative writing, and multimodal reasoning." + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.342, + 0.865, + 0.456 + ], + "angle": 0, + "content": "Mixed Preference Optimization. During Pre-training and SFT, the model is trained to predict the next token conditioned on previous ground-truth tokens. However, during inference, the model predicts each token based on its own prior outputs. This discrepancy between ground-truth tokens and model-predicted tokens introduces a distribution shift, which can impair the model's Chain-of-Thought (CoT) reasoning capabilities. To mitigate this issue, we employ Mixed Preference Optimization (MPO) [124], which introduces additional supervision from both positive and negative samples to align the model response distribution with the ground-truth distribution, thereby improving reasoning performance. Specifically, the training objective of MPO is a combination of preference loss \\(\\mathcal{L}_p\\), quality loss \\(\\mathcal{L}_q\\), and generation loss \\(\\mathcal{L}_g\\), which can be formulated as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.4, + 0.462, + 0.863, + 0.479 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} = w _ {p} \\mathcal {L} _ {p} + w _ {q} \\mathcal {L} _ {q} + w _ {g} \\mathcal {L} _ {g}, \\tag {9}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.485, + 0.863, + 0.514 + ], + "angle": 0, + "content": "where \\( w_{*} \\) represents the weight assigned to each loss component. Specifically, the DPO loss [101] serves as the preference loss to enable the model to learn the relative preference between chosen and rejected responses:" + }, + { + "type": "equation", + "bbox": [ + 0.326, + 0.52, + 0.863, + 0.551 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {p} = - \\log \\sigma \\left(\\beta \\log \\frac {\\pi_ {\\theta} \\left(y _ {c} \\mid x\\right)}{\\pi_ {0} \\left(y _ {c} \\mid x\\right)} - \\beta \\log \\frac {\\pi_ {\\theta} \\left(y _ {r} \\mid x\\right)}{\\pi_ {0} \\left(y _ {r} \\mid x\\right)}\\right), \\tag {10}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.556, + 0.863, + 0.6 + ], + "angle": 0, + "content": "where \\(\\beta\\) is the KL penalty coefficient, and \\(x\\), \\(y_{c}\\), and \\(y_{r}\\) are user query, chosen response, and rejected response, respectively. The policy model \\(\\pi_{\\theta}\\) is initialized from model \\(\\pi_0\\). After that, the BCO loss [53] is employed as the quality loss, which helps the model to understand the absolute quality of individual responses:" + }, + { + "type": "equation", + "bbox": [ + 0.441, + 0.606, + 0.863, + 0.625 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {q} = \\mathcal {L} _ {q} ^ {+} + \\mathcal {L} _ {q} ^ {-}, \\tag {11}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.632, + 0.863, + 0.674 + ], + "angle": 0, + "content": "where \\(\\mathcal{L}_q^+\\) and \\(\\mathcal{L}_q^-\\) represent the loss for chosen and rejected responses, respectively. They are calculated independently, requiring the model to differentiate the absolute quality of individual responses. The loss terms are given by:" + }, + { + "type": "equation", + "bbox": [ + 0.373, + 0.673, + 0.863, + 0.705 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {q} ^ {+} = - \\log \\sigma \\left(\\beta \\log \\frac {\\pi_ {\\theta} \\left(y _ {c} \\mid x\\right)}{\\pi_ {0} \\left(y _ {c} \\mid x\\right)} - \\delta\\right), \\tag {12}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.354, + 0.707, + 0.863, + 0.737 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {q} ^ {-} = - \\log \\sigma \\left(- \\left(\\beta \\log \\frac {\\pi_ {\\theta} \\left(y _ {r} \\mid x\\right)}{\\pi_ {0} \\left(y _ {r} \\mid x\\right)} - \\delta\\right)\\right), \\tag {13}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.739, + 0.863, + 0.782 + ], + "angle": 0, + "content": "where \\(\\delta\\) represents the reward shift, calculated as the moving average of previous rewards to stabilize training. Finally, the LM loss is used as the generation loss to help the model learn the generation process of preferred responses. The loss function is defined in Equation 6." + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.787, + 0.865, + 0.913 + ], + "angle": 0, + "content": "Data. For SFT data, we construct the training corpora based on those used in InternVL2.5 [18] while introducing additional tool usage, 3D scene understanding, GUI operations, scientific diagrams, creative writing, and multimodal reasoning samples. As a result, the number of training samples grows from 16.3M in InternVL2.5 to 21.7M in InternVL3. For MPO data, we construct preference pairs based on the data pipeline and samples proposed in MMPR v1.2 [124], which cover a wide range of domains, including general visual question answering (VQA) [43, 50, 90, 83, 127, 126], science [57, 16, 82], chart [91, 54, 11], mathematics [72, 104, 10, 81, 55, 40, 147, 106], OCR [92, 107, 9, 49, 96], and document [24]. We use the SFT versions of InternVL3-8B, 38B, and 78B to generate rollouts. During the MPO phase, all models are trained on the same dataset, which comprises about 300K samples." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.937, + 0.506, + 0.948 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.137, + 0.092, + 0.305, + 0.108 + ], + "angle": 0, + "content": "2.4 Test-Time Scaling" + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.117, + 0.862, + 0.161 + ], + "angle": 0, + "content": "Test-Time Scaling has been shown to be an effective method to enhance the reasoning abilities of LLMs and MLLMs [108, 94, 87, 70, 120, 36, 152, 125]. In this work, we use the Best-of-N evaluation strategy and employ VisualPRM-8B [125] as the critic model to select the best response for reasoning and mathematics evaluation." + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.166, + 0.862, + 0.25 + ], + "angle": 0, + "content": "Visual Process Reward Model. VisualPRM first assigns a quality score to each step of the given solution and then averages these scores to obtain the overall score for this solution. This process is formulated as a multi-turn chat task so that we can effectively leverage the generation ability of MLLMs. The image \\(I\\), question \\(q\\), and the first step \\(s_0\\) of the step-by-step solution \\(s = \\{s_0, s_1, \\dots, s_n\\} \\in S\\) to this question are included in the first turn and a new step is presented in each subsequent turn. During the training stage, the model is required to predict the correctness of the given step in each turn as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.42, + 0.257, + 0.861, + 0.275 + ], + "angle": 0, + "content": "\\[\nc _ {i} \\sim M \\left(y _ {i} \\mid I, q, s _ {\\leq i}\\right), \\tag {14}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.281, + 0.861, + 0.311 + ], + "angle": 0, + "content": "where \\( c_{i} \\in \\{+, -\\} \\) denotes the correctness of \\( i \\)-th step. During the inference stage, the score for each step is defined as the probability of generating \"+\"" + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.316, + 0.862, + 0.359 + ], + "angle": 0, + "content": "Data. VisualPRM400K [125] is used to train VisualPRM, which is constructed based on multimodal questions collected from MMPR v1.2 [124]. Following the data pipeline in VisualPRM400K, we further expand VisualPRM400K by sampling rollouts from the 8B and 38B variants of InternVL3." + }, + { + "type": "title", + "bbox": [ + 0.137, + 0.375, + 0.28, + 0.39 + ], + "angle": 0, + "content": "2.5 Infrastructure" + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.401, + 0.862, + 0.5 + ], + "angle": 0, + "content": "To facilitate model training, we extend the InternEVO framework [15]—originally designed to optimize the Zero Redundancy Optimizer (ZeRO) for large-scale LLM training—to support the training of our InternVL models. This extension enables efficient scaling to hundreds of billions of parameters across thousands of GPUs. The enhanced framework introduces flexible and decoupled sharding strategies for the ViT, MLP, and LLM components, significantly improving training efficiency by overlapping communication and computation. It further supports a comprehensive range of parallelism strategies—including data, tensor, sequence, and pipeline parallelism—as well as their arbitrary combinations." + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.505, + 0.862, + 0.56 + ], + "angle": 0, + "content": "A key challenge in MLLM training is the imbalance in computational load caused by the varying proportions of visual and textual tokens. Such imbalances can lead to inefficiencies by overburdening either the ViT or LLM modules. To address this, we introduce a suite of techniques that dynamically balance computational workloads across modules, ensuring efficient and equitable resource utilization." + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.567, + 0.862, + 0.664 + ], + "angle": 0, + "content": "For InternVL models of varying scales, the extended InternEVO framework formulates an optimization objective that identifies the optimal configuration to minimize both memory consumption and communication overhead across different module dimensions. To support sequences of up to 32K tokens, our approach incorporates both head-parallel and sequence-parallel techniques, effectively overcoming scalability bottlenecks while preserving computational efficiency. Compared to the training of InternVL2.5, the application of InternEVO in InternVL3 results in a training speedup of \\(50\\%\\) to \\(200\\%\\) for models of comparable size, given the same computational budget." + }, + { + "type": "title", + "bbox": [ + 0.137, + 0.685, + 0.279, + 0.701 + ], + "angle": 0, + "content": "3 Experiments" + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.716, + 0.862, + 0.843 + ], + "angle": 0, + "content": "In this section, we first compare the overall multimodal capabilities of InternVL3 with those of current advanced MLLMs using widely adopted multimodal benchmarks. Subsequently, we evaluate the performance of InternVL3 in various domains, including multimodal reasoning, mathematics, optical character recognition (OCR), chart and document understanding, multi-image understanding, real-world comprehension, comprehensive multimodal evaluation, multimodal hallucination evaluation, visual grounding, multimodal multilingual understanding, video understanding, and other multimodal tasks, most of which were tested using VLMEvalKit [33]. Additionally, we provide a detailed evaluation of the language capabilities of InternVL3. Finally, we analyze the advantages of several key modifications in InternVL3 compared to its predecessor, InternVL2.5, including the naive multimodal pre-training, the V2PE positional encoding, and the improvements brought by the post-training technique." + }, + { + "type": "title", + "bbox": [ + 0.136, + 0.858, + 0.523, + 0.873 + ], + "angle": 0, + "content": "3.1 Overall Comparison to Other Advanced MLLMs" + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.884, + 0.862, + 0.914 + ], + "angle": 0, + "content": "Figure 1 provides a detailed assessment of InternVL3's performance across a diverse set of benchmarks, including MMMU [141], MathVista [80], AI2D [57], ChartQA [91], DocVQA [93], InfographicVQA [92]," + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.505, + 0.948 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.135, + 0.092, + 0.862, + 0.149 + ], + "angle": 0, + "content": "HallusionBench [45], OCRBench [76], and LongVideoBench [129]. Compared with previous models, InternVL3 demonstrates substantial improvements across a wide range of task categories. These advancements can be primarily attributed to enhanced training strategies, refined testing methodologies, and the expanded training corpus." + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.154, + 0.862, + 0.211 + ], + "angle": 0, + "content": "More specifically, InternVL3 achieves an impressive score of 72.2 on the MMMU benchmark, underscoring its superior capacity to manage complex multimodal challenges. Beyond its performance on MMMU, InternVL3 consistently outperforms earlier versions of the InternVL series on a variety of tasks, thereby emphasizing its broad applicability to real-world scenarios that require sophisticated multimodal comprehension and reasoning." + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.216, + 0.862, + 0.313 + ], + "angle": 0, + "content": "In addition to surpassing its open-source counterparts, InternVL3 exhibits competitive performance relative to leading closed-source commercial models, such as ChatGPT-4o-latest [98] and Claude-3.5 Sonnet [3]. In many cases, the performance gap between InternVL3 and these proprietary models is notably narrowed—and in certain benchmarks, such as AI2D and ChartQA, InternVL3 even surpasses them. Nonetheless, our results further reveal that Gemini2.5 Pro [117] maintains a performance edge on select tasks (e.g., on HallusionBench), indicating that despite the notable progress in InternVL3, there remains room for further refinement of our InternVL series." + }, + { + "type": "title", + "bbox": [ + 0.137, + 0.331, + 0.463, + 0.347 + ], + "angle": 0, + "content": "3.2 Multimodal Reasoning and Mathematics" + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.357, + 0.862, + 0.414 + ], + "angle": 0, + "content": "To comprehensively evaluate the multimodal reasoning and mathematical capabilities of InternVL3, we conduct experiments on a series of benchmarks, including MMMU [141] for multidisciplinary reasoning, MathVista [80], MathVision [119], MathVerse [146] for mathematical reasoning, as well as DynaMath [155], WeMath [99] and LogicVista [131] for complementary evaluation on logical reasoning." + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.419, + 0.862, + 0.558 + ], + "angle": 0, + "content": "As shown in Table 2, InternVL3 exhibits strong performance across all tested benchmarks. Specifically, on the MMMU benchmark, InternVL3-based models consistently outperform smaller-scale competitors. For instance, with increasing model size, InternVL3-78B reaches a score over 72 on MMMU, indicating robust understanding and reasoning capability in handling abstract multidisciplinary concepts. In the mathematical domain, InternVL3 demonstrates significant gains across various benchmarks. On MathVista, InternVL3-78B records a performance close to 79.0, while on MathVision and MathVerse, the results are also competitive, evidencing the model's enhanced ability to tackle challenging mathematical problems. Furthermore, performance on DynaMath, WeMath, and LogicVista consistently improves with scaling. The overall score—a mean calculated across all benchmarks—shows that InternVL3 models achieve a balanced enhancement across different aspects, surpassing many of the preceding open-source methods." + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.563, + 0.862, + 0.635 + ], + "angle": 0, + "content": "A notable characteristic of InternVL3 is the efficiency of the best-of-N evaluation strategy [125]. When applying this method, even models with relatively smaller parameter sizes (e.g., InternVL3-1B and InternVL3-2B) exhibit substantial improvements in reasoning performance. Specifically, in the Vision-Only split of MathVerse, the best-of-8 strategy leads to increases of approximately 6.0 and 3.2 percentage points for InternVL3-38B and InternVL3-78B, respectively. This improvement underscores the effectiveness of test-time scaling." + }, + { + "type": "title", + "bbox": [ + 0.137, + 0.651, + 0.483, + 0.667 + ], + "angle": 0, + "content": "3.3 OCR, Chart, and Document Understanding" + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.677, + 0.862, + 0.761 + ], + "angle": 0, + "content": "To assess the model's integrated vision-language understanding in tasks involving text, document, and chart comprehension, we perform a comprehensive evaluation over nine benchmarks, including AI2D [57], ChartQA [91], TextVQA [107], DocVQA [93], InfoVQA [92], OCRBench [76], SEED-2-Plus [61], CharXiv [128], and VCR [148]. As illustrated in Table 3, the InternVL3 series not only maintains robust performance across these benchmarks but also demonstrates competitive or superior results when compared to other open-source and closed-source counterparts." + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.767, + 0.862, + 0.852 + ], + "angle": 0, + "content": "At the 1B scale, InternVL3-1B achieves performance that is roughly on par with previous lower-scale models. At the 2B scale, InternVL3-2B not only improves its absolute scores—for instance, reaching 78.7/87.4 on AI2D and 88.3 on DocVQA—but also exhibits a performance edge over similarly parameterized models such as Qwen2-VL-2B [121]. Although its TextVQA performance (77.0) remains comparable to that of Qwen2-VL-2B, the enhancements in document and chart understanding suggest that the proposed native multimodal pre-training are particularly effective in tasks requiring precise visual-textual integration." + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.856, + 0.862, + 0.913 + ], + "angle": 0, + "content": "The benefits of the new pre-training protocol become even more pronounced at larger scales. Mid-scale models like InternVL3-8B and InternVL3-9B deliver substantial gains, with InternVL3-8B achieving 85.2/92.6 on AI2D, 92.7 on DocVQA, and VCR scores of 94.5/98.1. Moreover, when compared with heavyweight systems such as Qwen2-VL-72B [121] or even closed-source models like GPT-4o-20240513 [97], the high-scale variants" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.505, + 0.948 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.167, + 0.087, + 0.833, + 0.582 + ], + "angle": 0, + "content": "
ModelMMMUMathVistaMathVisionMathVerseDynaMathWeMathLogicVistaOverall
LLaVA-OV-0.5B [60]31.434.8------
InternVL2.5-1B [18]41.247.121.116.45.611.126.024.1
InternVL3-1B43.445.818.818.75.813.429.825.1
w/ VisualPRM-Bo8 [125]55.462.121.728.913.428.534.935.0
Aquila-VL-2B [44]46.959.117.917.45.015.930.627.5
Qwen2.5-VL-3B [7]51.261.221.931.213.222.940.334.6
Ovis-2B [84]45.664.117.729.410.09.934.730.2
Ovis-4B [84]49.069.621.538.518.016.935.335.5
InternVL2.5-2B [18]43.251.114.022.34.48.027.324.3
InternVL2.5-4B [18]51.864.118.427.715.221.234.233.2
InternVL3-2B48.657.021.725.314.622.436.932.4
w/ VisualPRM-Bo8 [125]57.870.526.636.721.438.540.541.7
LLaVA-OV-7B [60]47.958.618.319.39.020.933.329.6
MiniCPM-V2.6 [135]49.860.823.418.99.816.427.529.5
MiniCPM-o2.6 [135]50.973.321.735.010.425.236.036.1
Ovis-8B [84]57.471.825.942.320.427.239.440.6
Qwen2.5-VL-8B [7]55.067.825.441.121.035.244.141.4
InternVL2.5-8B [18]56.264.517.022.89.423.536.032.8
InternVL3-8B62.771.629.339.825.537.144.144.3
w/ VisualPRM-Bo8 [125]66.075.237.546.328.548.149.750.2
InternVL3-9B57.771.527.635.326.733.849.243.1
w/ VisualPRM-Bo8 [125]63.776.233.945.829.146.650.649.4
Ovis2-16B [84]60.773.730.145.826.345.047.447.0
InternVL2.5-26B [18]60.768.223.424.011.430.939.636.9
InternVL3-14B67.175.137.244.431.343.051.249.9
w/ VisualPRM-Bo8 [125]69.377.940.147.733.152.056.253.8
Cambrian-34B [116]49.753.2------
VILA-1.5-40B [71]55.149.5------
Ovis2-34B [84]66.776.131.950.127.551.949.950.6
InternVL2.5-38B [18]63.971.932.236.920.038.347.944.4
InternVL3-38B70.175.134.248.235.348.658.452.8
w/ VisualPRM-Bo8 [125]71.079.441.854.236.155.258.456.6
GPT-4o-20241120 [97]70.760.031.240.634.545.852.847.9
Claude-3.7-Sonnet [3]75.066.841.946.739.749.358.253.9
Gemini-2.0-Flash [30]72.670.443.647.842.147.452.353.7
Gemini-2.0-Pro [29]69.971.348.167.343.356.553.258.5
LLaVA-OV-72B [60]55.767.125.327.215.632.040.937.7
QvQ-72B-Preview [115]70.370.334.948.230.739.058.250.2
Qwen2.5-VL-72B [7]68.274.239.347.335.949.155.752.8
InternVL2.5-78B [18]70.072.332.239.219.239.849.046.0
InternVL3-78B72.279.043.151.035.146.155.954.6
w/ VisualPRM-Bo8 [125]72.280.540.854.237.352.457.956.5
" + }, + { + "type": "table_caption", + "bbox": [ + 0.136, + 0.588, + 0.865, + 0.675 + ], + "angle": 0, + "content": "Table 2: Comparison of multimodal reasoning and mathematical performance. MMMU [141] is a multidisciplinary reasoning benchmark. MathVista [80], MathVision [119], MathVerse [146], DynaMath [155], and WeMath [99] are mathematics benchmarks. For MathVerse, we report the performance on Vision-Only split. LogicVista [131] is a logical reasoning benchmark. Part of the results are collected from the OpenCompass leaderboard [26]. The overall score is the average score of the above benchmarks. \"w/ VisualPRM-Bo8\" denotes that the model is evaluated with Best-of-8 settings, where VisualPRM [125] serves as the critic model." + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.703, + 0.865, + 0.748 + ], + "angle": 0, + "content": "of InternVL3—particularly InternVL3-38B and InternVL3-78B—push the envelope further. For instance, InternVL3-78B attains a remarkable OCRBench score of 906 and VCR scores of 96.0/98.6, clearly surpassing the corresponding metrics of comparable models." + }, + { + "type": "title", + "bbox": [ + 0.137, + 0.767, + 0.378, + 0.783 + ], + "angle": 0, + "content": "3.4 Multi-Image Understanding" + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.794, + 0.862, + 0.865 + ], + "angle": 0, + "content": "we evaluate the multi-image relation perception and understanding capabilities of InternVL3 across a suite of widely recognized benchmarks, including BLINK [39], Mantis-Eval [51], MMIU [95], MuirBench [118], MMT-Bench [137], and MIRB [153], as presented in Table 4. These benchmarks comprehensively assess skills such as cross-image reasoning and context integration, all of which are crucial for effective multimodal interaction." + }, + { + "type": "text", + "bbox": [ + 0.136, + 0.87, + 0.865, + 0.915 + ], + "angle": 0, + "content": "InternVL3 consistently outperforms its earlier counterparts across different parameter scales. For instance, at the 1B scale, InternVL3-1B exhibits a modest yet consistent improvement over preceding models, achieving a BLINK score of 42.9 and an MMT-Bench score of 52.9. The performance gains become even more pronounced" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.506, + 0.948 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.139, + 0.088, + 0.86, + 0.621 + ], + "angle": 0, + "content": "
Model NameAI2D (w / wo M)ChartQA (test avg)TextVQA (val)DocVQA (test)InfoVQA (test)OCR BenchSEED-2 PlusCharXiv (RQ / DQ)VCR-EN-Easy (EM / Jaccard)Overall
LLaVA-OneVision-0.5B [60]57.1 / -61.4-70.041.8565----
InternVL2-1B [19]64.1 / 70.572.970.581.750.975454.318.1 / 30.721.5 / 48.454.9
InternVL2.5-1B [18]69.3 / 77.875.972.084.856.078559.019.0 / 38.491.5 / 97.068.3
InternVL3-1B69.4 / 78.375.374.181.953.779058.221.0 / 47.189.3 / 96.268.6
Qwen2-VL-2B [121]74.7 / 84.673.579.790.165.580962.4-81.5 / --
Qwen2.5-VL-3B [7]81.6 / -84.079.393.977.179767.631.3 / 58.6--
Aquila-VL-2B [44]75.0 / -76.576.485.058.377263.0-70.0 / --
InternVL2-2B [19]74.1 / 82.376.273.486.958.978460.021.0 / 40.632.9 / 59.262.0
InternVL2.5-2B [18]74.9 / 83.579.274.388.760.980460.921.3 / 49.793.2 / 97.672.1
InternVL3-2B78.7 / 87.480.277.088.366.183564.628.3 / 54.791.2 / 96.974.7
Ovis1.6-Gemma2-9B [84]84.4 / -----830----
MiniCPM-V2.6 [135]82.1 / -82.480.190.8-85265.731.0 / 57.173.9 / 85.7-
Molmo-7B-D [31]- / 93.284.181.792.272.6694----
Qwen2-VL-7B [121]83.0 / 92.183.084.394.576.586669.0-89.7 / 93.8-
Qwen2.5-VL-7B [7]83.9 / -87.384.995.782.686470.442.5/73.9--
InternVL2-8B [19]83.8 / 91.783.377.491.674.879467.531.2 / 56.137.9 / 61.569.7
InternVL2.5-8B [18]84.5 / 92.884.879.193.077.682269.732.9 / 68.692.6 / 97.479.6
InternVL3-8B85.2 / 92.686.680.292.776.888069.737.6 / 73.694.5 / 98.181.3
InternVL3-9B84.6 / 92.986.279.493.679.687768.838.0 / 72.594.2 / 97.981.3
InternVL3-14B86.0 / 93.787.380.594.183.687570.343.1 / 82.294.8 / 98.283.4
InternVL-Chat-V1.5 [19]80.7 / 89.883.880.690.972.572466.329.2 / 58.514.7 / 51.465.9
InternVL2-26B [19]84.5 / 92.584.982.392.975.982567.633.4 / 62.474.5 / 86.776.7
InternVL2.5-26B [18]86.4 / 94.487.282.494.079.885270.835.9 / 73.594.4 / 98.081.8
Qwen2.5-VL-32B [7]---94.883.4-----
Cambrian-34B [116]79.5 / -75.676.775.546.0600-27.3 / 59.779.7 / 89.3-
VILA-1.5-40B [71]69.9 / -67.273.6--460-24.0 / 38.7--
InternVL2-40B [19]86.6 / 94.586.283.093.978.783769.232.3 / 66.084.7 / 92.679.3
InternVL2.5-38B [18]87.6 / 95.188.282.795.383.684271.242.4 / 79.694.7 / 98.283.6
InternVL3-38B88.9 / 95.589.283.995.485.088671.646.4 / 87.296.1 / 98.785.5
GPT-4V [97]78.2 / 89.478.578.088.475.164553.837.1 / 79.952.0 / 65.470.0
GPT-4o-20240513 [97]84.6 / 94.285.777.492.879.273672.047.1 / 84.591.6 / 96.481.6
Claude-3-Opus [3]70.6 / 88.180.867.589.355.669444.230.2 / 71.662.0 / 77.767.3
Claude-3.5-Sonnet [3]81.2 / 94.790.874.195.274.378871.760.2 / 84.363.9 / 74.778.7
Gemini-1.5-Pro [102]79.1 / 94.487.278.893.181.0754-43.3 / 72.062.7 / 77.7-
LLaVA-OneVision-72B [60]85.6 / -83.780.591.374.9741----
NVLM-D-72B [28]85.2 / 94.286.082.192.6-853----
Molmo-72B [31]- / 96.387.383.193.581.9-----
Qwen2-VL-72B [121]88.1 / -88.385.596.584.5877--91.3 / 94.6-
Qwen2.5-VL-72B [7]88.7 / -89.583.596.487.388573.049.7 / 87.4--
InternVL2-Llama3-76B [19]87.6 / 94.888.484.494.182.083969.738.9 / 75.283.2 / 91.381.1
InternVL2.5-78B [18]89.1 / 95.788.383.495.184.185471.342.4 / 82.395.7 / 94.583.9
InternVL3-78B89.7 / 96.089.784.395.486.590671.946.0 / 85.196.0 / 98.685.8
" + }, + { + "type": "table_caption", + "bbox": [ + 0.135, + 0.628, + 0.861, + 0.685 + ], + "angle": 0, + "content": "Table 3: Comparison of OCR, chart, and document understanding performance. We evaluate OCR-related capabilities across 9 benchmarks, including AI2D [57], ChartQA [91], TextVQA [107], DocVQA [93], InfoVQA [92], OCRBench [76], SEED-2-Plus [61], CharXiv [128], and VCR [148]. Part of results are collected from [34, 31, 3, 128, 148] and the OpenCompass leaderboard [26]." + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.725, + 0.861, + 0.782 + ], + "angle": 0, + "content": "at the 2B scale; InternVL3-2B attains a remarkable 65.9 on Mantis-Eval, representing an improvement of over 11 points relative to InternVL2.5-2B, and also boosts its MMT-Bench performance to 59.5. Such enhancements indicate that the advanced pre-training strategies and enhanced training datasets in InternVL3 significantly elevate its capability to capture and reason over inter-image relationships." + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.788, + 0.862, + 0.913 + ], + "angle": 0, + "content": "At higher scales, the trend continues. InternVL3-8B and its subsequent larger variants not only secure steady improvements on BLINK and MMT-Bench but also demonstrate substantial gains on the MIRB and MuirBench benchmarks. In particular, InternVL3-78B reaches a BLINK score of 66.3 and an MMT-Bench score of 73.2, positioning it as a competitive alternative to leading closed-source models like GPT-4o. These results suggest that the learning multimodal capabilities via native multimodal pre-training and the scaling of model parameters are key contributors to the elevated performance observed across diverse evaluation settings. Despite these encouraging outcomes, a noticeable performance gap between our InternVL3 and other MLLMs like Qwen2.5-VL still exists on certain benchmarks, such as MuirBench, implying that future work may benefit from further enhancements in training data curation and additional model refinements." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.139, + 0.088, + 0.86, + 0.525 + ], + "angle": 0, + "content": "
Model NameBLINK (val)Mantis EvalMMIUMuir BenchMMT (val)MIRB (avg)OverallRealWorld QAMME-RW (EN)WildVision (win rate)R-Bench (dis)Overall
LLaVA-OneVision-0.5B [60]52.139.6-25.5---55.6----
InternVL2-1B [19]38.646.137.329.349.531.538.750.340.217.855.641.0
InternVL2.5-1B [18]42.051.238.529.950.335.641.357.544.243.459.051.0
InternVL3-1B42.950.239.331.252.936.142.158.246.043.860.452.1
Qwen2-VL-2B [121]44.4---55.1--62.6----
Qwen2.5-VL-3B [6]47.6--47.7---65.453.1---
InternVL2-2B [19]43.848.439.832.550.432.141.257.347.331.856.848.3
InternVL2.5-2B [18]44.054.843.540.654.536.445.660.148.844.262.253.8
InternVL3-2B50.365.943.038.859.542.950.164.353.848.867.558.6
Qwen2-VL-7B [121]53.2---64.0--70.156.5-64.0-
Qwen2.5-VL-7B [6]56.4--59.6---68.557.4---
MiniCPM-V2.6 [135]53.069.0--60.8--65.0----
InternVL2-8B [19]50.965.442.048.760.050.052.864.453.554.467.960.1
InternVL2.5-8B [18]54.867.746.751.162.352.555.970.159.162.070.165.3
InternVL3-8B55.570.146.855.065.056.858.270.862.069.874.169.2
InternVL3-9B58.670.150.451.465.458.659.170.561.363.870.366.5
InternVL3-14B60.376.050.956.270.359.362.270.764.069.869.368.5
InternVL-Chat-V1.5 [19]46.666.837.438.558.050.349.666.049.456.667.960.0
InternVL2-26B [19]56.269.642.650.660.653.755.668.358.762.270.164.8
InternVL2.5-26B [18]61.875.649.461.166.955.761.874.561.865.272.968.6
Cambrian-34B [116]-------67.844.1---
InternVL2-40B [19]57.271.447.954.466.255.258.771.861.863.273.367.5
InternVL2.5-38B [18]63.278.355.362.770.061.265.173.564.066.472.169.0
InternVL3-38B64.077.957.463.871.862.366.275.667.371.673.372.0
GPT-4V [97]54.662.7-62.364.353.1-61.4-71.865.6-
GPT-4o-20240513 [97]68.0-55.768.065.4--75.445.280.677.769.7
Claude-3.5-Sonnet [3]--53.4----60.151.6---
Gemini-1.5-Pro [102]--53.4-64.5--67.538.2---
LLaVA-OneVision-72B [60]55.477.6-54.8---71.9----
Qwen2-VL-72B [121]----71.8--77.8----
Qwen2.5-VL-72B [6]64.4--70.7---75.763.2---
InternVL2-Llama3-76B [19]56.873.744.251.267.458.258.672.263.065.874.168.8
InternVL2.5-78B [18]63.877.055.863.570.861.165.378.762.971.477.272.6
InternVL3-78B66.379.360.464.573.264.368.078.065.473.677.473.6
" + }, + { + "type": "table_caption", + "bbox": [ + 0.136, + 0.532, + 0.865, + 0.591 + ], + "angle": 0, + "content": "Table 4: Comparison of multi-image and real-world understanding performance. Multi-image benchmarks include BLINK [39], Mantis-Eval [51], MMIU [95], MuirBench [118], MMT-Bench [137], and MIRB [153]. Real-world benchmarks encompass RealWorldQA [27], MME-RealWorld [151], WildVision [86], and R-Bench [62]. Part of the results are sourced from the benchmark papers and the OpenCompass leaderboard [26]." + }, + { + "type": "title", + "bbox": [ + 0.137, + 0.645, + 0.377, + 0.661 + ], + "angle": 0, + "content": "3.5 Real-World Comprehension" + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.684, + 0.862, + 0.809 + ], + "angle": 0, + "content": "We evaluate the InternVL3 series on four real-world comprehension benchmarks—RealWorldQA [27], MME-RealWorld [151], WildVision [86], and R-Bench [62]—to assess its ability to tackle realistic and complex tasks. As shown in Table 4, even the smallest variant in the InternVL3 family (InternVL3-1B) demonstrates promising performance with a RealWorldQA score of 58.2, an MME-RealWorld score of 46.0, a WildVision win rate of 43.8, and an R-Bench score of 60.4. Scaling up the model yields further enhancements across all metrics. Mid-sized variants such as InternVL3-8B and InternVL3-14B continue this positive trend, with InternVL3-8B reporting a RealWorldQA score of 70.8 and an R-Bench score of 74.1. These improvements highlight the effectiveness of scaling, as larger models provide more robust representations and enhanced comprehension capabilities in real-world scenarios." + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.814, + 0.862, + 0.913 + ], + "angle": 0, + "content": "At the higher end of the scale, the InternVL3-38B and InternVL3-78B models achieve top-tier results among the InternVL3 series. Notably, InternVL3-78B records a RealWorldQA score of 78.0, an MME-RealWorld score of 65.4, a WildVision win rate of 73.6, and an R-Bench score of 77.4. When compared with competitive models, such as GPT-4o [97]—which scores 75.4 on RealWorldQA and 80.6 on WildVision—the InternVL3 series exhibits competitive strengths. InternVL3-78B not only surpasses GPT-4o on RealWorldQA and closely matches its R-Bench performance but also considerably outperforms it on MME-RealWorld, indicating an overall robust performance on tasks demanding both perceptual precision and comprehensive understanding." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.139, + 0.088, + 0.86, + 0.537 + ], + "angle": 0, + "content": "
Model NameMME (sum)MMB (EN / CN)MMBv1.1 (EN)MMVet (turbo)MMVet2 (0613)MMStarOverallHallBench (avg)MMHal (score)CRPE (relation)POPE (avg)Overall
LLaVA-OneVision-0.5B [60]1438.061.6 / 55.559.632.2-37.7-27.9----
InternVL2-1B [19]1794.465.4 / 60.761.632.736.145.751.734.02.2557.587.345.3
InternVL2.5-1B [18]1950.570.7 / 66.368.448.843.250.158.939.02.4960.989.948.1
InternVL3-1B1934.472.6 / 67.969.959.547.551.561.941.42.5964.090.749.7
Qwen2-VL-2B [121]1872.074.9 / 73.572.249.5-48.0-41.7----
Qwen2.5-VL-3B [6]215779.1 / 78.177.461.8-55.9-46.3-73.6--
InternVL2-2B [19]1876.873.2 / 70.970.239.539.650.158.037.92.5266.388.348.8
InternVL2.5-2B [18]2138.274.7 / 71.972.260.852.353.765.342.62.9470.290.651.6
InternVL3-2B2221.281.1 / 78.478.662.253.960.769.842.53.2671.589.651.7
Qwen2-VL-7B [121]2326.883.0 / 80.580.762.0-60.7-50.63.4074.488.154.1
Qwen2.5-VL-7B [6]234783.5 / 83.482.667.1-63.9-52.9-76.4--
MiniCPM-V2.6 [135]2348.481.5 / 79.378.060.0-57.5-48.13.6075.287.353.6
InternVL2-8B [19]2210.381.7 / 81.279.554.252.362.069.245.23.3375.886.952.8
InternVL2.5-8B [18]2344.184.6 / 82.683.262.858.162.873.250.13.6578.490.655.7
InternVL3-8B2415.483.4 / 82.281.781.366.368.277.749.93.6176.391.155.2
InternVL3-9B2372.883.4 / 82.281.776.265.466.376.351.23.4775.090.455.0
InternVL3-14B2478.385.6 / 84.183.580.268.468.879.055.13.4977.390.256.5
InternVL-Chat-V1.5 [19]2194.282.2 / 82.080.361.551.557.369.750.33.1175.488.454.3
InternVL2-26B [19]2260.783.4 / 82.081.562.157.261.271.850.73.5575.688.054.5
InternVL2.5-26B [18]2373.385.4 / 85.584.265.060.866.575.255.03.7079.190.657.1
Cambrian-34B [116]-80.4 / 79.278.353.2-54.2-41.6----
InternVL2-40B [19]2307.586.8 / 86.585.165.563.865.475.756.93.7577.688.456.7
InternVL2.5-38B [18]2455.886.5 / 86.385.568.862.167.977.056.83.7178.390.757.4
InternVL3-38B2523.687.6 / 86.886.983.969.671.581.557.13.7777.190.657.1
GPT-4V [97]1926.681.0 / 80.280.067.566.356.070.746.5----
GPT-4o-20240513 [97]-83.4 / 82.183.169.171.064.7-55.04.0076.686.955.6
Claude-3-Opus [3]1586.863.3 / 59.260.151.755.845.755.537.8----
Claude-3.5-Sonnet [3]-82.6 / 83.580.970.171.865.1-55.5----
Gemini-1.5-Pro [102]-73.9 / 73.874.664.066.959.1-45.6----
LLaVA-OneVision-72B [60]2261.085.8 / 85.385.060.6-65.8-49.0----
Qwen2-VL-72B [121]2482.786.5 / 86.685.974.066.968.378.758.1----
Qwen2.5-VL-72B [6]2448.088.6 / 87.988.476.2-70.8-55.2-79.2--
InternVL2-Llama3-76B [19]2414.786.5 / 86.385.565.768.467.477.255.23.8377.689.056.4
InternVL2.5-78B [18]2494.588.3 / 88.587.472.365.569.579.257.43.8978.890.857.7
InternVL3-78B2549.889.0 / 88.787.781.370.072.582.059.13.8579.290.358.1
" + }, + { + "type": "table_caption", + "bbox": [ + 0.135, + 0.543, + 0.861, + 0.601 + ], + "angle": 0, + "content": "Table 5: Comparison of comprehensive multimodal understanding and hallucination performance. Comprehensive multimodal benchmarks include MME [37], MMBench series [75], MMVet series [138, 139], and MMStar [13]. Hallucination benchmarks encompass HallusionBench [45], MMHal [111], CRPE [126], and POPE [67]. Part of the results are sourced from the benchmark papers and the OpenCompass leaderboard [26]." + }, + { + "type": "title", + "bbox": [ + 0.137, + 0.667, + 0.454, + 0.683 + ], + "angle": 0, + "content": "3.6 Comprehensive Multimodal Evaluation" + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.711, + 0.862, + 0.809 + ], + "angle": 0, + "content": "The comprehensive multimodal evaluation is based on established benchmarks including MME [37], MMBench (evaluating both English and Chinese tasks) [75], MMBench v1.1 (English) [75], MMVet [138], MMVet v2 [139], and MMStar [13], as summarized in Table 5. Specifically, InternVL3-1B achieves an MMBench score of 72.6/67.9 (English/Chinese) and improves the MMBench v1.1 score to 69.9, compared to the InternVL2.5-1B baseline (70.7/66.3 and 68.4, respectively). The improvements become more pronounced at the 2B scale, where InternVL3-2B records an MME of 2221.2 and reaches an MMBench performance of 81.1/78.4, along with an MMBench v1.1 score of 78.6." + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.814, + 0.865, + 0.913 + ], + "angle": 0, + "content": "At larger scales, InternVL3 models consistently demonstrate superior performance. For example, the InternVL3-8B model achieves an MME of 2415.4, while the InternVL3-38B and InternVL3-78B models record MME scores of 2523.6 and 2549.8, respectively. The corresponding MMBench and MMBench v1.1 scores also show steady improvements, with InternVL3-78B attaining 89.0/88.7 for English/Chinese and 87.7 for English-only tasks. When compared with other competitive models, such as Qwen2-VL-72B and Qwen2.5-VL-72B, the InternVL3 series—especially the 78B variant—offers a consistent performance advantage on the multimodal understanding benchmarks." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.51, + 0.948 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.16, + 0.088, + 0.835, + 0.404 + ], + "angle": 0, + "content": "
Model NameRefCOCORefCOCO+RefCOCOg
valtest-Atest-Bvaltest-Atest-Bvaltest
Grounding-DINO-L [74]90.693.288.282.889.075.986.187.086.6
UNINEXT-H [133]92.694.391.585.289.679.888.789.488.9
ONE-PEACE [122]92.694.289.388.892.283.289.289.389.8
Qwen2.5-VL-3B [6]89.191.784.082.488.074.185.285.785.0
InternVL3-1B85.890.181.776.684.169.282.882.681.6
InternVL3-2B89.892.686.484.089.276.587.687.286.7
Shikra-7B [12]87.090.680.281.687.472.182.382.282.9
Ferret-v2-13B [144]92.695.088.987.492.181.489.490.089.6
CogVLM-Grounding [123]92.894.889.088.792.983.489.890.890.3
MM1.5 [143]-92.586.7-88.777.8-87.1-
Qwen2-VL-7B [121]91.793.687.385.890.579.587.387.887.9
Qwen2.5-VL-7B [6]90.092.585.484.289.176.987.287.286.6
TextHawk2 [140]91.993.087.686.290.080.488.288.188.2
InternVL2-8B [19]87.191.180.779.887.971.482.782.782.9
InternVL2.5-8B [18]90.394.585.985.291.578.886.787.687.6
InternVL3-8B92.594.688.088.292.581.889.690.089.6
InternVL3-9B91.893.286.686.491.079.988.088.588.2
InternVL3-14B92.094.487.887.492.181.588.689.389.1
Qwen2-VL-72B [121]93.295.390.790.193.885.689.990.491.1
Qwen2.5-VL-72B [6]92.794.689.788.992.283.789.990.390.3
InternVL2-Llama3-76B [19]92.294.888.488.893.182.889.590.390.0
InternVL2.5-78B [18]93.795.692.590.494.786.992.792.292.3
InternVL3-38B93.295.190.289.893.285.291.491.591.2
InternVL3-78B93.495.490.390.193.885.391.591.591.4
" + }, + { + "type": "table_caption", + "bbox": [ + 0.136, + 0.41, + 0.86, + 0.44 + ], + "angle": 0, + "content": "Table 6: Comparison of visual grounding performance. We evaluate InternVL's visual grounding capability on RefCOCO, RefCOCO+, and RefCOCOg datasets [56, 88]. Parts of the results are collected from [121]." + }, + { + "type": "title", + "bbox": [ + 0.137, + 0.467, + 0.441, + 0.482 + ], + "angle": 0, + "content": "3.7 Multimodal Hallucination Evaluation" + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.493, + 0.862, + 0.606 + ], + "angle": 0, + "content": "We evaluate InternVL's propensity for hallucinations on four established benchmarks—HallusionBench [45], MMHal-Bench [111], CRPE [126], and POPE [67]—as detailed in Table 5. In comparison with previous InternVL series, the new InternVL3 models demonstrate overall competitive performance across varying scales, while providing consistent improvements in handling multimodal hallucination challenges. In the small-parameter regime, InternVL3-1B attains a HallusionBench score of 41.4, representing an appreciable gain over the InternVL2.5-1B baseline, which scored 39.0. Similarly, the 2B variant of InternVL3 shows a comparable HallusionBench performance (42.5) to its InternVL2.5 counterpart (42.6), while registering a modest improvement in CRPE performance (71.5 vs. 70.2)." + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.61, + 0.862, + 0.696 + ], + "angle": 0, + "content": "In the large-scale setting, InternVL3-38B and InternVL3-78B are particularly noteworthy. InternVL3-38B obtains a HallusionBench score of 57.1, while InternVL3-78B reaches 59.1, accompanied by a CRPE improvement to 79.2. These figures position the InternVL3 series as competitive with leading closed- and open-source models such as GPT-4o and the Qwen2.5-VL series. Despite these advancements, minor declines on certain benchmarks, such as MMHal, indicate that although the InternVL3 series has made overall progress, optimizing data and training strategies to achieve more consistent improvements remains an important direction for future work." + }, + { + "type": "title", + "bbox": [ + 0.137, + 0.713, + 0.304, + 0.729 + ], + "angle": 0, + "content": "3.8 Visual Grounding" + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.739, + 0.861, + 0.797 + ], + "angle": 0, + "content": "We evaluate InternVL's visual grounding capability on the RefCOCO [56], RefCOCO+[56], and RefCOCOg[88] datasets, where the model is tasked with accurately localizing target objects in images from given textual descriptions. Table 6 shows a comprehensive comparison across various models, including several specialized grounding models as well as multiple MLLLMs." + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.801, + 0.862, + 0.913 + ], + "angle": 0, + "content": "Among the smaller-scale models, we observe that while Qwen2.5-VL-3B achieves an average score of 85.0, the InternVL3-1B and InternVL3-2B models yield average scores of 81.6 and 86.7, respectively. Notably, when scaling up, the InternVL3 series exhibits promising improvements. InternVL3-8B, InternVL3-9B, and InternVL3-14B yield average scores around 88.2–89.6, reflecting a consistent trend of performance gains as the model size increases. However, when reaching larger scales, the performance gains appear to plateau. For instance, InternVL2.5-78B reaches an average score of 92.3, and InternVL3-78B only shows a score of 91.4. We speculate that this is because InternVL3's training data expansion does not include additional grounding-specific data and the relative reduction in grounding-targeted data could have restricted the localization capabilities." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.139, + 0.089, + 0.86, + 0.476 + ], + "angle": 0, + "content": "
Model NameMMMBMultilingual MMBenchMTVQA (avg)Overall
enzhptartrruenzhptartrru
InternVL2-1B [19]73.267.455.553.543.855.267.961.250.843.331.852.712.640.7
InternVL2.5-1B [18]78.870.261.555.045.361.172.564.757.043.037.853.221.446.0
InternVL3-1B79.470.162.358.047.661.972.666.262.348.039.560.322.247.9
Qwen2-VL-2B [121]78.374.272.668.361.872.872.171.169.961.154.469.320.052.6
Qwen2.5-VL-3B [6]------------24.8-
InternVL2-2B [19]79.471.654.043.546.448.173.869.651.429.831.342.310.939.3
InternVL2.5-2B [18]81.474.458.248.346.453.276.571.655.937.333.944.821.845.2
InternVL3-2B81.978.375.468.662.974.681.377.875.966.459.570.726.757.4
mPLUG-Owl2 [136]67.361.059.745.845.462.666.259.458.237.947.760.4--
Qwen2-VL-7B [121]83.982.481.279.074.782.481.881.679.175.674.579.325.661.6
Qwen2.5-VL-7B [6]------------29.2-
InternVL2-8B [19]83.481.576.166.369.275.782.981.876.060.566.074.420.956.6
InternVL2.5-8B [18]84.383.178.669.371.579.583.883.279.464.367.877.327.660.4
InternVL3-8B85.183.182.581.676.283.485.585.683.279.275.982.630.264.7
InternVL3-9B84.883.780.669.968.580.886.585.279.164.368.379.127.160.7
InternVL3-14B85.784.783.183.779.383.686.785.883.281.180.783.831.666.2
InternVL-Chat-V1.5 [19]82.680.876.365.268.674.081.180.276.956.266.771.020.555.7
InternVL2-26B [19]83.881.778.068.869.376.382.781.877.861.969.674.417.756.2
InternVL2.5-26B [18]86.283.881.673.373.782.886.185.580.767.575.079.628.562.6
InternVL2-40B [19]85.384.181.170.374.281.486.285.882.864.074.281.820.659.7
InternVL2.5-38B [18]86.485.184.184.382.884.987.588.685.384.584.085.931.767.4
InternVL3-38B86.785.684.584.882.685.189.089.387.184.684.387.432.468.1
GPT-4V [97]75.074.271.573.569.073.177.674.472.572.370.574.822.056.1
GPT-4o [97]------------27.8-
Gemini-1.0-Pro [114]75.071.970.669.969.672.773.672.170.361.169.870.5--
Qwen2-VL-72B [121]86.885.385.284.884.285.386.987.285.883.584.485.330.967.2
Qwen2.5-VL-72B [6]------------31.7-
InternVL2-Llama3-76B [19]85.385.182.882.883.083.787.887.385.983.185.085.722.063.9
InternVL2.5-78B [18]86.385.685.184.883.185.490.089.787.483.384.986.331.968.0
InternVL3-78B87.286.685.586.584.686.189.490.388.786.186.688.132.568.9
" + }, + { + "type": "table_caption", + "bbox": [ + 0.136, + 0.484, + 0.861, + 0.527 + ], + "angle": 0, + "content": "Table 7: Comparison of multimodal multilingual performance. We evaluate multilingual capabilities across 3 benchmarks, including MMMB [109], Multilingual MMBench [109] and MTVQA [113]. The languages evaluated are English (en), Chinese (zh), Portuguese (pt), Arabic (ar), Turkish (tr), and Russian (ru)." + }, + { + "type": "title", + "bbox": [ + 0.137, + 0.563, + 0.46, + 0.578 + ], + "angle": 0, + "content": "3.9 Multimodal Multilingual Understanding" + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.592, + 0.862, + 0.663 + ], + "angle": 0, + "content": "We assess InternVL's multimodal multilingual understanding capabilities using benchmarks—MMMB, Multilingual MMBench [109], and MTVQA [113]—as shown in Table 7. The InternVL3 series demonstrates consistent improvements in multilingual performance compared to previous predecessors. For example, the lightweight InternVL3-1B already shows a modest improvement over InternVL2.5-1B, while the larger-scale variants, such as InternVL3-38B and InternVL3-78B, achieve significantly higher average scores across all three benchmarks." + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.668, + 0.862, + 0.739 + ], + "angle": 0, + "content": "Comparisons with other leading models further highlight the effectiveness of the InternVL3 series. Notably, the InternVL3 variants achieve performance that is competitive with or superior to models such as Qwen2-VL-72B [121] and Qwen2.5-VL-72B [6]. Overall, the enhanced performance of the InternVL3 series across MMMB, Multilingual MMBench, and MTVQA underscores the promise of our approach in advancing global multimodal applications." + }, + { + "type": "title", + "bbox": [ + 0.137, + 0.764, + 0.336, + 0.78 + ], + "angle": 0, + "content": "3.10 Video Understanding" + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.794, + 0.862, + 0.85 + ], + "angle": 0, + "content": "Video understanding is essential for evaluating how well MLLMs capture temporal and multimodal cues in complex video content. In this work, we assess the InternVL3 series on six established benchmarks—Video-MME [38], MVBench [65], MMBench-Video [35], MLVU [154], LongVideoBench [129], and CG-Bench [2], as detailed in Table 8." + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.856, + 0.861, + 0.913 + ], + "angle": 0, + "content": "Overall, the InternVL3 models demonstrate clear performance improvements and a strong scalability trend over their predecessors. As the model capacity increases, the performance gains become more pronounced. For instance, InternVL3-2B records higher Video-MME scores (58.9/61.4) and improved MVBench and MLVU performance compared to the earlier 2B variants." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "14" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.142, + 0.088, + 0.855, + 0.562 + ], + "angle": 0, + "content": "
Model NameVideo-MME (wo / w sub)MVBenchMMBench-Video (val)MLVU (M-Avg)LongVideoBench (val total)CG-Bench (long / clue acc.)Overall
InternVL2-1B [19]42.9 / 45.457.51.1451.643.3--
InternVL2.5-1B [18]50.3 / 52.364.31.3657.347.9--
InternVL3-1B51.0 / 53.063.11.353.048.124.8 / 39.146.9
Qwen2-VL-2B [121]55.6 / 60.463.2-----
Qwen2.5-VL-3B [7]61.5 / 67.667.01.6368.243.3--
InternVL2-2B [19]46.2 / 49.160.21.3054.346.0--
InternVL2.5-2B [18]51.9 / 54.168.81.4461.452.0--
InternVL3-2B58.9 / 61.470.41.4264.255.430.8 / 50.754.9
VideoChat2-HD [64]45.3 / 55.762.31.2247.9---
MiniCPM-V-2.6 [135]60.9 / 63.6-1.70-54.9--
LLaVA-OneVision-7B [60]58.2 / -56.7-----
Qwen2-VL-7B [121]63.3 / 69.067.01.44-55.6--
Qwen2.5-VL-7B [7]65.1 / 71.669.61.7970.245.3--
InternVL2-8B [19]56.3 / 59.365.81.5764.054.6--
InternVL2.5-8B [18]64.2 / 66.972.01.6868.960.0--
InternVL3-8B66.3 / 68.975.41.6971.458.838.6 / 55.261.4
InternVL3-9B66.7 / 68.974.31.6970.862.541.1 / 58.062.3
InternVL3-14B70.4 / 73.076.61.7373.363.944.1 / 60.664.9
InternVL2-26B [19]57.0 / 60.267.51.6764.256.1--
InternVL2.5-26B66.9 / 69.275.21.8672.359.9--
Oryx-1.5-32B [78]67.3 / 74.970.11.5272.3---
Qwen2.5-VL-32B [7]70.5 / 77.9-1.93----
VILA-1.5-40B [71]60.1 / 61.1-1.6156.7---
InternVL2-40B [19]66.1 / 68.672.01.7871.060.6--
InternVL2.5-38B [18]70.7 / 73.174.41.8275.363.3--
InternVL3-38B72.7 / 75.076.91.8177.867.346.9 / 62.867.5
GPT-4V/4T [1]59.9 / 63.343.71.5349.259.1--
GPT-4o-20240513 [97]71.9 / 77.2-1.6364.666.7--
GPT-4o-20240806 [97]--1.87--41.8 / 58.3-
Gemini-1.5-Pro [102]75.0 / 81.3-1.30-64.040.1 / 56.4-
VideoLLaMA2-72B [23]61.4 / 63.162.0-----
LLaVA-OneVision-72B [60]66.2 / 69.559.4-66.461.3--
Qwen2-VL-72B [121]71.2 / 77.873.61.70--41.3 / 56.2-
Qwen2.5-VL-72B [7]73.3 / 79.170.42.0274.660.7--
InternVL2-Llama3-76B [19]64.7 / 67.869.61.7169.961.1--
InternVL2.5-78B [18]72.1 / 74.076.41.9775.763.642.2 / 58.566.0
InternVL3-78B72.7 / 75.778.71.8179.565.748.4 / 65.368.3
" + }, + { + "type": "table_caption", + "bbox": [ + 0.135, + 0.567, + 0.861, + 0.625 + ], + "angle": 0, + "content": "Table 8: Comparison of video understanding performance. We evaluate InternVL's video understanding capabilities across 6 benchmarks. For Video-MME [38], MMBench-Video [35], MLVU [154], and LongVideoBench [129], we test with four different settings: 16, 32, 48, and 64 frames, and report the maximum results. For MVBench [65], we conduct testing using 16 frames. For CG-Bench [2], we use 32 frames." + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.652, + 0.861, + 0.723 + ], + "angle": 0, + "content": "The scaling behavior of the InternVL3 series is further evident in the larger models. InternVL3-14B attains a Video-MME score of 70.4/73.0, while InternVL3-38B and InternVL3-78B push these metrics even higher, reaching scores of 72.7/75.0 and 72.7/75.7, respectively. Additionally, the inclusion of CG-Bench evaluations for the InternVL3 series provides further insight into long-range video reasoning, with performance steadily improving as model size increases—for example, InternVL3-78B attains 48.4/65.3 on CG-Bench." + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.728, + 0.861, + 0.812 + ], + "angle": 0, + "content": "When compared with other open-source models, the InternVL3 series demonstrates competitive advantages. For instance, while Qwen2.5-VL models achieve impressive results (with Qwen2.5-VL-72B scoring 73.3/79.1 on Video-MME), the InternVL3 series tends to outperform them in other metrics, such as MVBench and MLVU. Similarly, while closed-source systems like Gemini-1.5-Pro sometimes yield superior results on select benchmarks (e.g., Video-MME), the overall performance of InternVL3, especially at larger scales, is highly competitive." + }, + { + "type": "title", + "bbox": [ + 0.137, + 0.83, + 0.3, + 0.845 + ], + "angle": 0, + "content": "3.11 GUI Grounding" + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.856, + 0.862, + 0.913 + ], + "angle": 0, + "content": "GUI grounding requires precise localization and understanding of interface elements, which is critical for applications like automated UI testing and assistive technologies. In Table 9, we report the performance on GUI grounding benchmarks, comparing InternVL3 with state-of-the-art multimodal and GUI-specific models. The results demonstrate that InternVL3 achieves competitive performance across different scales. On" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "15" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.141, + 0.09, + 0.855, + 0.145 + ], + "angle": 0, + "content": "
MethodGPT-4oGemini 2.0ClaudeAguvis-72BQwen2.5-VL-72BUI-TARS-72BInternVL3-8B-38B-72B
ScreenSpot18.184.083.089.287.188.479.585.688.7
ScreenSpot-V2-----90.381.488.390.9
" + }, + { + "type": "table_caption", + "bbox": [ + 0.198, + 0.15, + 0.799, + 0.166 + ], + "angle": 0, + "content": "Table 9: Performance of InternVL3 and other models on GUI grounding benchmarks." + }, + { + "type": "table", + "bbox": [ + 0.14, + 0.179, + 0.855, + 0.327 + ], + "angle": 0, + "content": "
Model NameObj.countAbs.Dist.Obj.sizeRoom SizeRel.Dist.Rel.Dir.Route PlanAppr.OrderOverall
GPT-4o [97]46.25.343.838.237.041.331.528.534.0
Gemini-1.5 Pro [102]56.230.964.143.651.346.336.034.645.4
VILA-1.5-8B [71]17.421.850.318.832.134.831.024.828.9
LongVA-7B [145]38.016.638.922.233.143.325.415.729.2
LLaVA-NeXT-Video-7B [150]48.514.047.824.243.542.434.030.635.6
LLaVA-OneVision-7B [60]47.720.247.412.342.535.229.424.432.4
InternVL3-8B68.139.048.433.648.336.427.335.442.1
InternVL3-38B71.750.246.141.753.538.628.960.748.9
LLaVA-NeXT-Video-72B [150]48.922.857.435.342.436.735.048.640.9
LLaVA-OneVision-72B [60]43.523.957.637.542.539.932.544.640.2
InternVL3-78B71.253.744.439.555.939.528.954.548.4
" + }, + { + "type": "table_caption", + "bbox": [ + 0.254, + 0.333, + 0.742, + 0.348 + ], + "angle": 0, + "content": "Table 10: Performance of InternVL3 and other models on VSI-Bench." + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.375, + 0.862, + 0.42 + ], + "angle": 0, + "content": "ScreenSpot [22], InternVL3-72B achieves \\(88.7\\%\\) accuracy, slightly outperforming UI-TARS-72B [100] \\((88.4\\%)\\) and Qwen2.5-VL-72B \\((87.1\\%)\\), while Aguvis-72B [132] leads with \\(89.2\\%\\). Notably, InternVL3-38B \\((85.6\\%)\\) surpasses GPT-4o \\((18.1\\%)\\) and Gemini 2.0 \\((84.0\\%)\\) by a significant margin." + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.424, + 0.862, + 0.537 + ], + "angle": 0, + "content": "For the more challenging ScreenSpot-V2 [130] benchmark, InternVL3 exhibits strong scaling behavior: InternVL3-72B achieves \\(90.9\\%\\), outperforming UI-TARS-72B \\((90.3\\%)\\). The 8B variant \\((81.4\\%)\\) already surpasses UI-TARS-72B, while the 38B model \\((88.3\\%)\\) further closes the gap to the 72B version. These results highlight InternVL3's robustness in GUI understanding tasks, particularly in handling complex screen layouts and dynamic interfaces. The performance improvements with model scale suggest that larger architectures better capture the fine-grained visual-textual alignments required for precise GUI grounding. The superior performance of the InternVL3 models highlights their robustness in interpreting complex visual layouts. Future work will explore extending these capabilities to more dynamic and interactive GUI environments." + }, + { + "type": "title", + "bbox": [ + 0.137, + 0.552, + 0.314, + 0.568 + ], + "angle": 0, + "content": "3.12 Spatial Reasoning" + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.578, + 0.862, + 0.678 + ], + "angle": 0, + "content": "Spatial reasoning involves constructing a mental representation of a three-dimensional environment from visual inputs—a capability that is vital for applications such as autonomous driving. Table 10 reports the performance results on the Visual-Spatial Intelligence Benchmark (VSI-Bench) [134], where InternVL3 is compared against other state-of-the-art MLLMs. The results clearly indicate that InternVL3 outperforms its competitors in spatial reasoning tasks. In particular, the InternVL3-8B variant achieves a score of 42.1, leading all open-source MLLMs in the benchmark. Moreover, the InternVL3-38B and InternVL3-78B variants score 48.9 and 48.4, respectively—both superior to proprietary models such as GPT-4o, Gemini-1.5 Flash, and Gemini-1.5 Pro." + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.681, + 0.862, + 0.753 + ], + "angle": 0, + "content": "Furthermore, InternVL3 exhibits exceptional performance in several sub-category tasks within the benchmark. It attains a score of 71.2 in object counting, 53.7 in absolute distance estimation, 55.9 in relative distance estimation, and 54.5 in appearance order prediction, demonstrating its robust spatial reasoning capabilities. These promising results underscore the potential of InternVL3 for advancing 3D scene understanding, and future work will explore its integration into various downstream applications." + }, + { + "type": "title", + "bbox": [ + 0.137, + 0.768, + 0.434, + 0.784 + ], + "angle": 0, + "content": "3.13 Evaluation on Language Capability" + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.794, + 0.862, + 0.867 + ], + "angle": 0, + "content": "Table 11 presents the performance evaluation of language capabilities across a diverse array of benchmarks. These benchmarks cover comprehensive assessments in general knowledge, linguistic understanding, reasoning, mathematics, and coding tasks, such as MMLU [46], CMMLU [63], C-Eval [48], GAOKAO-Bench [149], TriviaQA [52], NaturalQuestions [58, 110], RACE [59], WinoGrande [103], HellaSwag [142], BigBench Hard [112], GSM8K-Test [25], MATH [47], TheoremQA [17], HumanEval [14], MBPP [4], and MBPP-CN [4]." + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.87, + 0.862, + 0.914 + ], + "angle": 0, + "content": "In particular, the experiments conducted compare the performance of Qwen2.5 chat models against corresponding InternVL3 variants. Both model series share the same pre-trained Qwen2.5 base model as their initialization. After undergoing native multimodal pre-training followed by additional post-training, the In" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "16" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.176, + 0.086, + 0.819, + 0.43 + ], + "angle": 0, + "content": "
DatasetVersionQwen2.5-0.5B ChatQwen2.5-1.5B ChatQwen2.5-7B ChatQwen2.5-14B ChatQwen2.5-32B ChatQwen2.5-72B Chat
InterVL3-1BInterVL3-2BInterVL3-8BInterVL3-14BInterVL3-38BInterVL3-78B
MMLU4d595a46.449.861.864.874.277.379.582.183.385.484.486.9
CMMLUc1336547.256.762.972.278.884.482.685.885.888.787.489.9
C-Eval2daf2453.559.066.273.377.884.581.485.686.589.288.189.5
GAOKAO4c31db30.946.653.767.781.389.586.991.290.893.591.093.1
TriviaQA2121ce24.221.539.841.255.851.565.167.465.870.174.074.7
NaturalQuestions3dceal8.28.515.215.917.928.219.731.419.731.023.839.0
C38c358f35.266.381.284.790.895.192.196.392.397.496.197.6
RACE-High69ee4f51.568.876.084.686.890.889.693.091.594.291.794.2
WinoGrandeb3677047.252.956.561.971.578.179.184.383.886.783.987.8
HellaSwage4271039.347.062.073.885.490.290.593.092.195.592.795.6
BBH5b92b021.534.539.752.065.777.473.082.585.587.785.485.2
GSM8K1d7fe439.047.261.672.580.183.182.488.484.789.788.290.5
MATH39342427.832.749.357.372.672.273.776.381.172.281.478.9
TheoremQA6f0af812.312.914.415.620.125.518.524.121.918.922.930.4
HumanEval8e312c27.439.051.862.882.378.181.178.189.087.887.282.3
MBPPa447ff38.547.551.460.774.369.376.775.183.777.486.876.7
MBPP-CN9114d519.630.634.445.864.464.475.467.277.875.476.076.0
Overall-33.542.451.659.269.472.973.476.677.478.978.980.5
" + }, + { + "type": "table_caption", + "bbox": [ + 0.139, + 0.437, + 0.861, + 0.507 + ], + "angle": 0, + "content": "Table 11: Comparison of language model performance across multiple benchmarks. These results were obtained using the OpenCompass toolkit. We compare InternVL3 with Qwen2.5 Chat models, whose corresponding pre-trained base models are employed as the initialization of the language component in InternVL3. Please note that the evaluation scores of the Qwen2.5 series may differ from those officially reported, as we have adopted the prompt versions provided in the table across all datasets for OpenCompass evaluation." + }, + { + "type": "table", + "bbox": [ + 0.139, + 0.546, + 0.86, + 0.666 + ], + "angle": 0, + "content": "
V2PEδTextVQA valVizWiz valChartQA test avgDocVQA valAI2D testInfoVQA valGQA testSQA-I testPOPETiny LVLMMMMU valSEED v1 imageOverall
X-78.461.781.489.481.169.460.894.487.9348.552.675.675.2
1/25678.061.781.288.581.067.761.094.488.3345.352.975.975.0
1/6478.362.081.789.481.369.660.994.788.3345.752.376.175.3
1/1678.762.181.790.481.670.461.195.088.2345.053.376.175.6
1/479.062.282.491.081.871.761.294.988.1345.852.676.275.9
1/178.761.782.290.281.771.461.294.688.5347.252.476.175.7
" + }, + { + "type": "table_caption", + "bbox": [ + 0.139, + 0.672, + 0.86, + 0.714 + ], + "angle": 0, + "content": "Table 12: Performance of the pre-trained InternVL3-8B model on multimodal benchmarks with different positional encoding strategies. When employing V2PE, the impact of different positional increment values \\(\\delta\\) is systematically evaluated." + }, + { + "type": "text", + "bbox": [ + 0.139, + 0.767, + 0.859, + 0.794 + ], + "angle": 0, + "content": "ternVL3 series consistently demonstrates superior performance over the Qwen2.5 chat models across most evaluation benchmarks." + }, + { + "type": "text", + "bbox": [ + 0.139, + 0.801, + 0.861, + 0.911 + ], + "angle": 0, + "content": "This observed enhancement in language capabilities primarily arises from several factors, including the integration of approximately \\(25\\%\\) pure-language data, joint parameter optimization during native multimodal pre-training, and the extensive use of high-quality textual corpora during the subsequent post-training stage. Such an approach not only strengthens multimodal comprehension but also significantly enhances language proficiency. Consequently, even when derived from identical pre-trained base models, the integrated multimodal and pure-text training strategy employed by InternVL3 results in substantially improved performance in language capabilities compared to the specialized training pipeline designed for pure-text tasks used by the Qwen2.5 chat models." + }, + { + "type": "page_number", + "bbox": [ + 0.492, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "17" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.142, + 0.093, + 0.856, + 0.361 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.136, + 0.371, + 0.861, + 0.403 + ], + "angle": 0, + "content": "Figure 3: Performance comparison on multimodal benchmarks under different training strategies. Native multimodal pre-training endows MLLMs with strong multimodal capabilities, even without further post-training." + }, + { + "type": "table", + "bbox": [ + 0.142, + 0.416, + 0.859, + 0.613 + ], + "angle": 0, + "content": "
ModelMPOMMMUMathVistaMathVisionMathVerseDynaMathWeMathLogicVistaOverall
InternVL3-1B43.447.213.818.14.214.731.124.6
43.445.818.818.75.813.429.825.1 (+0.5)
InternVL3-2B49.159.022.023.213.418.130.030.7
48.657.021.725.314.622.436.932.4 (+1.7)
InternVL3-8B61.967.424.736.922.832.743.241.4
62.771.629.339.825.537.144.144.3 (+2.9)
InternVL3-9B59.068.828.932.223.032.546.541.6
57.771.527.635.326.733.849.243.1 (+1.5)
InternVL3-14B67.170.531.238.827.938.149.946.2
67.175.137.244.431.343.051.249.9 (+3.7)
InternVL3-38B69.371.234.245.122.241.754.448.3
70.175.134.248.235.348.658.452.8 (+4.5)
InternVL3-78B72.274.035.244.231.742.553.550.5
72.279.043.151.035.146.155.954.6 (+4.1)
" + }, + { + "type": "table_caption", + "bbox": [ + 0.145, + 0.619, + 0.846, + 0.634 + ], + "angle": 0, + "content": "Table 13: Comparison of reasoning abilities before and after Mixed Preference Optimization (MPO)." + }, + { + "type": "title", + "bbox": [ + 0.137, + 0.664, + 0.292, + 0.679 + ], + "angle": 0, + "content": "3.14 Ablation Study" + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.69, + 0.862, + 0.789 + ], + "angle": 0, + "content": "The Effectiveness of Native Multimodal Pre-Training. To assess the effectiveness of native multimodal pre-training, we conduct experiments on the InternVL2-8B model while keeping its architecture, initialization parameters, and training data entirely unchanged. Traditionally, InternVL2-8B employs a training pipeline that begins with an MLP warmup phase for multimodal alignment, followed by an instruction-tuning stage. In our experiments, we substitute the conventional MLP warmup phase with our native multimodal pre-training process. This modification isolates the contribution of native multimodal pre-training to the overall multimodal capability of the model." + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.794, + 0.862, + 0.866 + ], + "angle": 0, + "content": "The evaluation results in Figure 3 show that the model with native multimodal pre-training exhibits performance on most benchmarks that is comparable to the fully multi-stage-trained InternVL2-8B baseline. Furthermore, when followed by instruction tuning on higher-quality data, the model demonstrates further performance gains across evaluated multimodal tasks. These findings underscore the efficiency of native multimodal pre-training in imparting powerful multimodal capabilities to MLLMs." + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.87, + 0.862, + 0.913 + ], + "angle": 0, + "content": "The Evaluation of Variable Visual Position Encoding. To promote the multimodal capabilities in long-context scenarios, InternVL3 employs Variable Visual Position Encoding (V2PE) in its visual embedding. However, in the original V2PE [42], this specialized positional encoding for visual tokens did not yield benefits on" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "18" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.135, + 0.092, + 0.865, + 0.135 + ], + "angle": 0, + "content": "multimodal tasks with moderate context lengths. To further explore the efficacy of V2PE in a broader setting, we incorporated it during the native multimodal pre-training stage and evaluated the InternVL3-8B pre-trained model on standard multimodal benchmarks." + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.14, + 0.861, + 0.224 + ], + "angle": 0, + "content": "As reported in Table 12, the introduction of V2PE leads to significant performance gains across most evaluation metrics. In addition, our ablation studies—by varying the positional increment \\(\\delta\\)—reveal that even for tasks primarily involving short contexts, relatively small \\(\\delta\\) values can achieve optimal performance. These findings provide important insights for future efforts aimed at refining position encoding strategies for visual tokens in MLLMs. It is important to note that, to ensure fair comparisons, all results elsewhere in this report maintain a fixed \\(\\delta = 1\\), except for the experimental results presented in Table 12." + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.229, + 0.861, + 0.314 + ], + "angle": 0, + "content": "Mixed Preference Optimization. Here, we demonstrate the effectiveness of MPO. As shown in Table 13, models fine-tuned with MPO demonstrate superior reasoning performance across seven multimodal reasoning benchmarks compared to their counterparts without MPO. Specifically, InternVL3-78B and InternVL3-38B outperform their counterparts by 4.1 and 4.5 points, respectively. Notably, the training data used for MPO is a subset of that used for SFT, indicating that the performance improvements primarily stem from the training algorithm rather than the training data." + }, + { + "type": "title", + "bbox": [ + 0.137, + 0.335, + 0.266, + 0.35 + ], + "angle": 0, + "content": "4 Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.367, + 0.865, + 0.534 + ], + "angle": 0, + "content": "We have introduced InternVL3, a significant advancement in the InternVL series that implements a native multimodal pre-training paradigm. By jointly learning linguistic and multimodal capabilities during the pretraining phase, InternVL3 avoids the training complexities and optimization challenges typically associated with post-hoc MLLM training pipelines. Through the incorporation of variable visual position encoding (V2PE) for extended multimodal contexts, advanced post-training strategies—such as supervised fine-tuning and mixed preference optimization—and test-time scaling, InternVL3 establishes a new open-source benchmark across a wide range of multimodal tasks, while simultaneously preserving robust linguistic competencies. Notably, InternVL3-78B attains a 72.2-point score on the MMMU benchmark, exceeding previous open-source MLLMs and reducing the performance gap relative to leading proprietary counterparts (e.g., Gemini-2.5 Pro). In line with our commitment to fostering community-driven innovation in multimodal large language models, we will publicly release InternVL3's training data and model weights, thereby encouraging further research and development in this rapidly evolving field." + }, + { + "type": "title", + "bbox": [ + 0.138, + 0.553, + 0.233, + 0.568 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.152, + 0.577, + 0.861, + 0.617 + ], + "angle": 0, + "content": "[1] Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ilge Akkaya, Florencia Leoni Aleman, Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, et al. Gpt-4 technical report. arXiv preprint arXiv:2303.08774, 2023. 15" + }, + { + "type": "ref_text", + "bbox": [ + 0.152, + 0.628, + 0.861, + 0.655 + ], + "angle": 0, + "content": "[2] Anonymous. CG-bench: Clue-grounded question answering benchmark for long video understanding. In Submitted to The Thirteenth International Conference on Learning Representations, 2024. under review. 14, 15" + }, + { + "type": "ref_text", + "bbox": [ + 0.152, + 0.667, + 0.861, + 0.693 + ], + "angle": 0, + "content": "[3] Anthropic. The claude 3 model family: Opus, sonnet, haiku. https://www.anthropic.com, 2024. 2, 8, 9, 10, 11, 12" + }, + { + "type": "ref_text", + "bbox": [ + 0.152, + 0.705, + 0.861, + 0.742 + ], + "angle": 0, + "content": "[4] Jacob Austin, Augustus Odena, Maxwell Nye, Maarten Bosma, Henryk Michalewski, David Dohan, Ellen Jiang, Carrie Cai, Michael Terry, Quoc Le, et al. Program synthesis with large language models. arXiv preprint arXiv:2108.07732, 2021. 16" + }, + { + "type": "ref_text", + "bbox": [ + 0.152, + 0.757, + 0.861, + 0.795 + ], + "angle": 0, + "content": "[5] Jinze Bai, Shuai Bai, Shusheng Yang, Shijie Wang, Sinan Tan, Peng Wang, Junyang Lin, Chang Zhou, and Jingren Zhou. Qwen-vl: A frontier large vision-language model with versatile abilities. arXiv preprint arXiv:2308.12966, 2023. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.152, + 0.808, + 0.861, + 0.835 + ], + "angle": 0, + "content": "[6] Shuai Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Sibo Song, Kai Dang, Peng Wang, Shijie Wang, Jun Tang, et al. Qwen2. 5-vl technical report. arXiv preprint arXiv:2502.13923, 2025. 11, 12, 13, 14" + }, + { + "type": "ref_text", + "bbox": [ + 0.152, + 0.847, + 0.861, + 0.874 + ], + "angle": 0, + "content": "[7] Shuai Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Sibo Song, Kai Dang, Peng Wang, Shijie Wang, Jun Tang, et al. Qwen2.5-vl technical report. arXiv preprint arXiv:2502.13923, 2025. 1, 2, 9, 10, 15" + }, + { + "type": "ref_text", + "bbox": [ + 0.152, + 0.885, + 0.861, + 0.91 + ], + "angle": 0, + "content": "[8] Loubna Ben Allal, Anton Lozhkov, Guilherme Penedo, Thomas Wolf, and Leandro von Werra. Smoll-m-corpus, 2024. 5" + }, + { + "type": "list", + "bbox": [ + 0.152, + 0.577, + 0.861, + 0.91 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "19" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.151, + 0.092, + 0.86, + 0.133 + ], + "angle": 0, + "content": "[9] Ali Furkan Biten, Ruben Tito, Andres Mafla, Lluis Gomez, Marçal Rusinol, Ernest Valveny, CV Jawahar, and Dimosthenis Karatzas. Scene text visual question answering. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 4291-4301, 2019. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.147, + 0.141, + 0.861, + 0.181 + ], + "angle": 0, + "content": "[10] Jie Cao and Jing Xiao. An augmented benchmark dataset for geometric question answering through dual parallel text encoding. In Proceedings of the 29th International Conference on Computational Linguistics, pages 1511-1520, 2022. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.147, + 0.19, + 0.86, + 0.218 + ], + "angle": 0, + "content": "[11] Shuaichen Chang, David Palzer, Jialin Li, Eric Fosler-Lussier, and Ningchuan Xiao. Mapqa: A dataset for question answering on choropleth maps. arXiv preprint arXiv:2211.08545, 2022. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.148, + 0.226, + 0.86, + 0.254 + ], + "angle": 0, + "content": "[12] Keqin Chen, Zhao Zhang, Weili Zeng, Richong Zhang, Feng Zhu, and Rui Zhao. Shikra: Unleashing multimodal lmm's referential dialogue magic. arXiv preprint arXiv:2306.15195, 2023. 13" + }, + { + "type": "ref_text", + "bbox": [ + 0.147, + 0.262, + 0.862, + 0.302 + ], + "angle": 0, + "content": "[13] Lin Chen, Jinsong Li, Xiaoyi Dong, Pan Zhang, Yuhang Zang, Zehui Chen, Haodong Duan, Jiaqi Wang, Yu Qiao, Dahua Lin, et al. Are we on the right way for evaluating large vision-language models? arXiv preprint arXiv:2403.20330, 2024. 12" + }, + { + "type": "ref_text", + "bbox": [ + 0.147, + 0.311, + 0.862, + 0.351 + ], + "angle": 0, + "content": "[14] Mark Chen, Jerry Tworek, Heewoo Jun, Qiming Yuan, Henrique Ponde de Oliveira Pinto, Jared Kaplan, Harri Edwards, Yuri Burda, Nicholas Joseph, Greg Brockman, et al. Evaluating large language models trained on code. arXiv preprint arXiv:2107.03374, 2021. 16" + }, + { + "type": "ref_text", + "bbox": [ + 0.147, + 0.36, + 0.862, + 0.401 + ], + "angle": 0, + "content": "[15] Qiaoling Chen, Diandian Gu, Guoteng Wang, Xun Chen, YingTong Xiong, Ting Huang, Qinghao Hu, Xin Jin, Yonggang Wen, Tianwei Zhang, et al. Internevo: Efficient long-sequence large language model training via hybrid parallelism and redundant sharding. arXiv preprint arXiv:2401.09149, 2024. 2, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.148, + 0.409, + 0.861, + 0.438 + ], + "angle": 0, + "content": "[16] Qiguang Chen, Libo Qin, Jin Zhang, Zhi Chen, Xiao Xu, and Wanxiang Che. M3cot: A novel benchmark for multi-domain multi-step multi-modal chain-of-thought. arXiv preprint arXiv:2405.16473, 2024. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.147, + 0.446, + 0.862, + 0.498 + ], + "angle": 0, + "content": "[17] Wenhu Chen, Ming Yin, Max Ku, Pan Lu, Yixin Wan, Xueguang Ma, Jianyu Xu, Xinyi Wang, and Tony Xia. Theoremqa: A theorem-driven question answering dataset. In Houda Bouamor, Juan Pino, and Kalika Bali, editors, Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, EMNLP 2023, Singapore, December 6-10, 2023, pages 7889-7901. Association for Computational Linguistics, 2023. 16" + }, + { + "type": "ref_text", + "bbox": [ + 0.147, + 0.507, + 0.862, + 0.547 + ], + "angle": 0, + "content": "[18] Zhe Chen, Weiyun Wang, Yue Cao, Yangzhou Liu, Zhangwei Gao, Erfei Cui, Jinguo Zhu, Shenglong Ye, Hao Tian, Zhaoyang Liu, et al. Expanding performance boundaries of open-source multimodal models with model, data, and test-time scaling. arXiv preprint arXiv:2412.05271, 2024. 1, 2, 3, 5, 6, 9, 10, 11, 12, 13, 14, 15" + }, + { + "type": "ref_text", + "bbox": [ + 0.147, + 0.556, + 0.862, + 0.596 + ], + "angle": 0, + "content": "[19] Zhe Chen, Weiyun Wang, Hao Tian, Shenglong Ye, Zhangwei Gao, Erfei Cui, Wenwen Tong, Kongzhi Hu, Jiapeng Luo, Zheng Ma, et al. How far are we to gpt-4v? closing the gap to commercial multimodal models with open-source suites. arXiv preprint arXiv:2404.16821, 2024. 1, 3, 10, 11, 12, 13, 14, 15" + }, + { + "type": "ref_text", + "bbox": [ + 0.147, + 0.605, + 0.862, + 0.644 + ], + "angle": 0, + "content": "[20] Zhe Chen, Weiyun Wang, Hao Tian, Shenglong Ye, Zhangwei Gao, Erfei Cui, Wenwen Tong, Kongzhi Hu, Jiapeng Luo, Zheng Ma, et al. How far are we to gpt-4v? closing the gap to commercial multimodal models with open-source suites. arXiv preprint arXiv:2404.16821, 2024. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.147, + 0.653, + 0.862, + 0.705 + ], + "angle": 0, + "content": "[21] Zhe Chen, Jiannan Wu, Wenhai Wang, Weijie Su, Guo Chen, Sen Xing, Muyan Zhong, Qinglong Zhang, Xizhou Zhu, Lewei Lu, et al. Internvl: Scaling up vision foundation models and aligning for generic visual-linguistic tasks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 24185–24198, 2024. 1, 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.147, + 0.715, + 0.862, + 0.743 + ], + "angle": 0, + "content": "[22] Kanzhi Cheng, Qiushi Sun, Yougang Chu, Fangzhi Xu, Yantao Li, Jianbing Zhang, and Zhiyong Wu. Seeclick: Harnessing gui grounding for advanced visual gui agents. arXiv preprint arXiv:2401.10935, 2024. 16" + }, + { + "type": "ref_text", + "bbox": [ + 0.147, + 0.751, + 0.862, + 0.791 + ], + "angle": 0, + "content": "[23] Zesen Cheng, Sicong Leng, Hang Zhang, Yifei Xin, Xin Li, Guanzheng Chen, Yongxin Zhu, Wenqi Zhang, Ziyang Luo, Deli Zhao, et al. Videollama 2: Advancing spatial-temporal modeling and audio understanding in video-llms. arXiv preprint arXiv:2406.07476, 2024. 15" + }, + { + "type": "ref_text", + "bbox": [ + 0.147, + 0.8, + 0.862, + 0.828 + ], + "angle": 0, + "content": "[24] Christopher Clark and Matt Gardner. Simple and effective multi-paragraph reading comprehension. In Proceedings of the Annual Meeting of the Association for Computational Linguistics, pages 845–855, 2018. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.147, + 0.836, + 0.862, + 0.876 + ], + "angle": 0, + "content": "[25] Karl Cobbe, Vineet Kosaraju, Mohammad Bavarian, Mark Chen, Heewoo Jun, Lukasz Kaiser, Matthias Plappert, Jerry Tworek, Jacob Hilton, Reiichiro Nakano, et al. Training verifiers to solve math word problems. arXiv preprint arXiv:2110.14168, 2021. 16" + }, + { + "type": "ref_text", + "bbox": [ + 0.147, + 0.885, + 0.862, + 0.913 + ], + "angle": 0, + "content": "[26] OpenCompass Contributors. Opencompass: A universal evaluation platform for foundation models. https://github.com/open-compass/opencompass, 2023. 9, 10, 11, 12" + }, + { + "type": "list", + "bbox": [ + 0.147, + 0.092, + 0.862, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "20" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.146, + 0.092, + 0.865, + 0.121 + ], + "angle": 0, + "content": "[27] X.AI Corp. Grok-1.5 vision preview: Connecting the digital and physical worlds with our first multimodal model. https://x.ai/blog/grok-1.5v, 2024.11" + }, + { + "type": "ref_text", + "bbox": [ + 0.147, + 0.128, + 0.864, + 0.169 + ], + "angle": 0, + "content": "[28] Wenliang Dai, Nayeon Lee, Boxin Wang, Zhuolin Yang, Zihan Liu, Jon Barker, Tuomas Rintamaki, Moham-mad Shoeybi, Bryan Catanzaro, and Wei Ping. NvIm: Open frontier-class multimodal llms. arXiv preprint arXiv:2409.11402, 2024. 10" + }, + { + "type": "ref_text", + "bbox": [ + 0.146, + 0.177, + 0.865, + 0.207 + ], + "angle": 0, + "content": "[29] Google Deepmind. Gemini 2.0 is now available to everyone. https://blog.google/technology/google-deepmind/gemini-model-updates-february-2025/, 202.9" + }, + { + "type": "ref_text", + "bbox": [ + 0.146, + 0.214, + 0.862, + 0.242 + ], + "angle": 0, + "content": "[30] Google Deepmind. Introducing gemini 2.0: our new ai model for the agentic era. https://blog.google/technology/google-deepmind/google-gemini-ai-update-december-2024/, 2024.9" + }, + { + "type": "ref_text", + "bbox": [ + 0.146, + 0.25, + 0.862, + 0.29 + ], + "angle": 0, + "content": "[31] Matt Deitke, Christopher Clark, Sangho Lee, Rohun Tripathi, Yue Yang, Jae Sung Park, Mohammadreza Salehi, Niklas Muennighoff, Kyle Lo, Luca Soldaini, et al. Molmo and pixmo: Open weights and open data for state-of-the-art multimodal models. arXiv preprint arXiv:2409.17146, 2024. 1, 10" + }, + { + "type": "ref_text", + "bbox": [ + 0.146, + 0.299, + 0.862, + 0.34 + ], + "angle": 0, + "content": "[32] Xiaoyi Dong, Pan Zhang, Yuhang Zang, Yuhang Cao, Bin Wang, Linke Ouyang, Songyang Zhang, Haodong Duan, Wenwei Zhang, Yining Li, et al. Internlm-xcomposer2-4khd: A pioneering large vision-language model handling resolutions from 336 pixels to 4k hd. arXiv preprint arXiv:2404.06512, 2024. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.146, + 0.347, + 0.862, + 0.389 + ], + "angle": 0, + "content": "[33] Haodong Duan, Junming Yang, Yuxuan Qiao, Xinyu Fang, Lin Chen, Yuan Liu, Xiaoyi Dong, Yuhang Zang, Pan Zhang, Jiaqi Wang, et al. Vlmevalkit: An open-source toolkit for evaluating large multi-modality models. In Proceedings of the 32nd ACM International Conference on Multimedia, pages 11198-11201, 2024. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.146, + 0.396, + 0.862, + 0.437 + ], + "angle": 0, + "content": "[34] Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Amy Yang, Angela Fan, et al. The llama 3 herd of models. arXiv preprint arXiv:2407.21783, 2024. 10" + }, + { + "type": "ref_text", + "bbox": [ + 0.146, + 0.445, + 0.862, + 0.474 + ], + "angle": 0, + "content": "[35] Xinyu Fang, Kangrui Mao, Haodong Duan, Xiangyu Zhao, Yining Li, Dahua Lin, and Kai Chen. Mmbench-video: A long-form multi-shot benchmark for holistic video understanding. arXiv preprint arXiv:2406.14515, 2024. 14, 15" + }, + { + "type": "ref_text", + "bbox": [ + 0.146, + 0.482, + 0.862, + 0.522 + ], + "angle": 0, + "content": "[36] Li Fei-Fei, Rob Fergus, and Pietro Perona. Learning generative visual models from few training examples: An incremental bayesian approach tested on 101 object categories. In Conference on Computer Vision and Pattern Recognition Workshop, pages 178-178, 2004. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.146, + 0.53, + 0.862, + 0.571 + ], + "angle": 0, + "content": "[37] Chaoyou Fu, Peixian Chen, Yunhang Shen, Yulei Qin, Mengdan Zhang, Xu Lin, Zhenyu Qiu, Wei Lin, Jinrui Yang, Xiawu Zheng, et al. Mme: A comprehensive evaluation benchmark for multimodal large language models. arXiv preprint arXiv:2306.13394, 2023. 12" + }, + { + "type": "ref_text", + "bbox": [ + 0.146, + 0.579, + 0.862, + 0.62 + ], + "angle": 0, + "content": "[38] Chaoyou Fu, Yuhan Dai, Yondong Luo, Lei Li, Shuhuai Ren, Renrui Zhang, Zihan Wang, Chenyu Zhou, Yunhang Shen, Mengdan Zhang, et al. Video-mme: The first-ever comprehensive evaluation benchmark of multi-modal llms in video analysis. arXiv preprint arXiv:2405.21075, 2024. 14, 15" + }, + { + "type": "ref_text", + "bbox": [ + 0.146, + 0.628, + 0.862, + 0.668 + ], + "angle": 0, + "content": "[39] Xingyu Fu, Yushi Hu, Bangzheng Li, Yu Feng, Haoyu Wang, Xudong Lin, Dan Roth, Noah A Smith, Wei-Chiu Ma, and Ranjay Krishna. Blink: Multimodal large language models can see but not perceive. arXiv preprint arXiv:2404.12390, 2024. 9, 11" + }, + { + "type": "ref_text", + "bbox": [ + 0.146, + 0.677, + 0.862, + 0.717 + ], + "angle": 0, + "content": "[40] Jiahui Gao, Renjie Pi, Jipeng Zhang, Jiacheng Ye, Wanjun Zhong, Yufei Wang, Lanqing Hong, Jianhua Han, Hang Xu, Zhenguo Li, et al. G-llava: Solving geometric problem with multi-modal large language model. arXiv preprint arXiv:2312.11370, 2023. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.146, + 0.726, + 0.862, + 0.767 + ], + "angle": 0, + "content": "[41] Zhangwei Gao, Zhe Chen, Erfei Cui, Yiming Ren, Weiyun Wang, Jinguo Zhu, Hao Tian, Shenglong Ye, Junjun He, Xizhou Zhu, et al. Mini-internvl: A flexible-transfer pocket multimodal model with \\(5\\%\\) parameters and \\(90\\%\\) performance. arXiv preprint arXiv:2410.16261, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.146, + 0.775, + 0.862, + 0.815 + ], + "angle": 0, + "content": "[42] Junqi Ge, Ziyi Chen, Jintao Lin, Jinguo Zhu, Xihui Liu, Jifeng Dai, and Xizhou Zhu. V2pe: Improving multi-modal long-context capability of vision-language models with variable visual position encoding. arXiv preprint arXiv:2412.09616, 2024. 2, 3, 18" + }, + { + "type": "ref_text", + "bbox": [ + 0.146, + 0.823, + 0.862, + 0.864 + ], + "angle": 0, + "content": "[43] Yash Goyal, Tejas Khot, Douglas Summers-Stay, Dhruv Batra, and Devi Parikh. Making the v in vqa matter: Elevating the role of image understanding in visual question answering. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6904-6913, 2017. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.146, + 0.873, + 0.862, + 0.913 + ], + "angle": 0, + "content": "[44] Shuhao Gu, Jialing Zhang, Siyuan Zhou, Kevin Yu, Zhaohu Xing, Liangdong Wang, Zhou Cao, Jintao Jia, Zhuoyi Zhang, Yixuan Wang, et al. Infinity-mm: Scaling multimodal performance with large-scale and high-quality instruction data. arXiv preprint arXiv:2410.18558, 2024. 9, 10" + }, + { + "type": "list", + "bbox": [ + 0.146, + 0.092, + 0.865, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "21" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.146, + 0.092, + 0.86, + 0.133 + ], + "angle": 0, + "content": "[45] Tianrui Guan, Fuxiao Liu, Xiyang Wu, Ruiqi Xian, Zongxia Li, Xiaoyu Liu, Xijun Wang, Lichang Chen, Furong Huang, Yaser Yacoob, et al. Hallusionbench: An advanced diagnostic suite for entangled language hallucination & visual illusion in large vision-language models. arXiv preprint arXiv:2310.14566, 2023. 8, 12, 13" + }, + { + "type": "ref_text", + "bbox": [ + 0.147, + 0.139, + 0.862, + 0.178 + ], + "angle": 0, + "content": "[46] Dan Hendrycks, Collin Burns, Steven Basart, Andy Zou, Mantas Mazeika, Dawn Song, and Jacob Steinhardt. Measuring massive multitask language understanding. In The International Conference on Learning Representations, 2020. 16" + }, + { + "type": "ref_text", + "bbox": [ + 0.147, + 0.187, + 0.862, + 0.239 + ], + "angle": 0, + "content": "[47] Dan Hendrycks, Collin Burns, Saurav Kadavath, Akul Arora, Steven Basart, Eric Tang, Dawn Song, and Jacob Steinhardt. Measuring mathematical problem solving with the MATH dataset. In Joaquin Vanschoeren and Sai-Kit Yeung, editors, Proceedings of the Neural Information Processing Systems Track on Datasets and Benchmarks 1, NeurIPS Datasets and Benchmarks 2021, December 2021, virtual, 2021. 16" + }, + { + "type": "ref_text", + "bbox": [ + 0.147, + 0.248, + 0.862, + 0.287 + ], + "angle": 0, + "content": "[48] Yuzhen Huang, Yuzhuo Bai, Zhihao Zhu, Junlei Zhang, Jinghan Zhang, Tangjun Su, Junteng Liu, Chuancheng Lv, Yikai Zhang, Yao Fu, et al. C-eval: A multi-level multi-discipline chinese evaluation suite for foundation models. Advances in Neural Information Processing Systems, 36, 2024. 16" + }, + { + "type": "ref_text", + "bbox": [ + 0.147, + 0.295, + 0.862, + 0.334 + ], + "angle": 0, + "content": "[49] Zheng Huang, Kai Chen, Jianhua He, Xiang Bai, Dimosthenis Karatzas, Shijian Lu, and CV Jawahar. Icdar2019 competition on scanned receiptOCR and information extraction. In 2019 International Conference on Document Analysis and Recognition (ICDAR), pages 1516-1520. IEEE, 2019. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.147, + 0.342, + 0.862, + 0.381 + ], + "angle": 0, + "content": "[50] Drew A Hudson and Christopher D Manning. Gqa: A new dataset for real-world visual reasoning and compositional question answering. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6700–6709, 2019. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.147, + 0.39, + 0.862, + 0.418 + ], + "angle": 0, + "content": "[51] Dongfu Jiang, Xuan He, Huaye Zeng, Cong Wei, Max Ku, Qian Liu, and Wenhu Chen. Mantis: Interleaved multi-image instruction tuning. arXiv preprint arXiv:2405.01483, 2024. 9, 11" + }, + { + "type": "ref_text", + "bbox": [ + 0.147, + 0.425, + 0.862, + 0.453 + ], + "angle": 0, + "content": "[52] Mandar Joshi, Eunsol Choi, Daniel S Weld, and Luke Zettlemoyer. Triviaqa: A large scale distantly supervised challenge dataset for reading comprehension. arXiv preprint arXiv:1705.03551, 2017. 16" + }, + { + "type": "ref_text", + "bbox": [ + 0.147, + 0.46, + 0.862, + 0.487 + ], + "angle": 0, + "content": "[53] Seungjae Jung, Gunsoo Han, Daniel Wontae Nam, and Kyoung-Woon On. Binary classifier optimization for large language model alignment. arXiv preprint arXiv:2404.04656, 2024. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.147, + 0.495, + 0.862, + 0.534 + ], + "angle": 0, + "content": "[54] Kushal Kafle, Brian Price, Scott Cohen, and Christopher Kanan. Dvqa: Understanding data visualizations via question answering. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5648-5656, 2018. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.147, + 0.542, + 0.862, + 0.57 + ], + "angle": 0, + "content": "[55] Mehran Kazemi, Hamidreza Alvari, Ankit Anand, Jialin Wu, Xi Chen, and Radu Soricut. Geomverse: A systematic evaluation of large models for geometric reasoning. arXiv preprint arXiv:2312.12241, 2023. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.147, + 0.578, + 0.862, + 0.617 + ], + "angle": 0, + "content": "[56] Sahar Kazemzadeh, Vicente Ordonez, Mark Matten, and Tamara Berg. Referitgame: Referring to objects in photographs of natural scenes. In Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing, pages 787-798, 2014. 13" + }, + { + "type": "ref_text", + "bbox": [ + 0.147, + 0.625, + 0.862, + 0.653 + ], + "angle": 0, + "content": "[57] Aniruddha Kembhavi, Mike Salvato, Eric Kolve, Minjoon Seo, Hannaneh Hajishirzi, and Ali Farhadi. A diagram is worth a dozen images. In European Conference on Computer Vision, pages 235-251, 2016. 6, 7, 8, 10" + }, + { + "type": "ref_text", + "bbox": [ + 0.147, + 0.66, + 0.862, + 0.699 + ], + "angle": 0, + "content": "[58] Tom Kwiatkowski, Jennimaria Palomaki, Olivia Redfield, Michael Collins, Ankur Parikh, Chris Alberti, Danielle Epstein, Illia Polosukhin, Jacob Devlin, Kenton Lee, et al. Natural questions: a benchmark for question answering research. Transactions of the Association for Computational Linguistics, 7:453-466, 2019. 16" + }, + { + "type": "ref_text", + "bbox": [ + 0.147, + 0.708, + 0.862, + 0.735 + ], + "angle": 0, + "content": "[59] Guokun Lai, Qizhe Xie, Hanxiao Liu, Yiming Yang, and Eduard Hovy. Race: Large-scale reading comprehension dataset from examinations. arXiv preprint arXiv:1704.04683, 2017. 16" + }, + { + "type": "ref_text", + "bbox": [ + 0.147, + 0.743, + 0.862, + 0.781 + ], + "angle": 0, + "content": "[60] Bo Li, Yuanhan Zhang, Dong Guo, Renrui Zhang, Feng Li, Hao Zhang, Kaichen Zhang, Yanwei Li, Ziwei Liu, and Chunyuan Li. Llava-onevision: Easy visual task transfer. arXiv preprint arXiv:2408.03326, 2024. 9, 10, 11, 12, 15, 16" + }, + { + "type": "ref_text", + "bbox": [ + 0.147, + 0.79, + 0.862, + 0.828 + ], + "angle": 0, + "content": "[61] Bohao Li, Yuying Ge, Yi Chen, Yixiao Ge, Ruimao Zhang, and Ying Shan. Seed-bench-2-plus: Benchmarking multimodal large language models with text-rich visual comprehension. arXiv preprint arXiv:2404.16790, 2024.8, 10" + }, + { + "type": "ref_text", + "bbox": [ + 0.147, + 0.838, + 0.862, + 0.877 + ], + "angle": 0, + "content": "[62] Chunyi Li, Jianbo Zhang, Zicheng Zhang, Haoning Wu, Yuan Tian, Wei Sun, Guo Lu, Xiaohong Liu, Xiongkuo Min, Weisi Lin, et al. R-bench: Are your large multimodal model robust to real-world corruptions? arXiv preprint arXiv:2410.05474, 2024. 11" + }, + { + "type": "ref_text", + "bbox": [ + 0.147, + 0.885, + 0.862, + 0.912 + ], + "angle": 0, + "content": "[63] Haonan Li, Yixuan Zhang, Fajri Koto, Yifei Yang, Hai Zhao, Yeyun Gong, Nan Duan, and Timothy Baldwin. Cmflu: Measuring massive multitask language understanding in chinese. arXiv preprint arXiv:2306.09212, 2023. 16" + }, + { + "type": "list", + "bbox": [ + 0.146, + 0.092, + 0.862, + 0.912 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "22" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.146, + 0.092, + 0.865, + 0.121 + ], + "angle": 0, + "content": "[64] KunChang Li, Yinan He, Yi Wang, Yizhuo Li, Wenhai Wang, Ping Luo, Yali Wang, Limin Wang, and Yu Qiao. Videochat: Chat-centric video understanding. arXiv preprint arXiv:2305.06355, 2023. 15" + }, + { + "type": "ref_text", + "bbox": [ + 0.147, + 0.125, + 0.864, + 0.167 + ], + "angle": 0, + "content": "[65] Kunchang Li, Yali Wang, Yinan He, Yizhuo Li, Yi Wang, Yi Liu, Zun Wang, Jilan Xu, Guo Chen, Ping Luo, et al. Mvbench: A comprehensive multi-modal video understanding benchmark. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 22195-22206, 2024. 14, 15" + }, + { + "type": "ref_text", + "bbox": [ + 0.147, + 0.173, + 0.865, + 0.214 + ], + "angle": 0, + "content": "[66] Yanghao Li, Chao-Yuan Wu, Haoqi Fan, Karttikeya Mangalam, Bo Xiong, Jitendra Malik, and Christoph Feichtenhofer. Mviv2: Improved multiscale vision transformers for classification and detection. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4804-4814, 2022. 1, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.146, + 0.219, + 0.862, + 0.261 + ], + "angle": 0, + "content": "[67] Yifan Li, Yifan Du, Kun Zhou, Jinping Wang, Wayne Xin Zhao, and Ji-Rong Wen. Evaluating object hallucination in large vision-language models. In The Conference on Empirical Methods in Natural Language Processing, pages 292–305, 2023. 12, 13" + }, + { + "type": "ref_text", + "bbox": [ + 0.147, + 0.267, + 0.862, + 0.307 + ], + "angle": 0, + "content": "[68] Zhang Li, Biao Yang, Qiang Liu, Zhiyin Ma, Shuo Zhang, Jingxu Yang, Yabo Sun, Yuliang Liu, and Xiang Bai. Monkey: Image resolution and text label are important things for large multi-modal models. arXiv preprint arXiv:2311.06607, 2023. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.147, + 0.313, + 0.862, + 0.355 + ], + "angle": 0, + "content": "[69] Zhiqi Li, Guo Chen, Shilong Liu, Shihao Wang, Vibashan VS, Yishen Ji, Shiyi Lan, Hao Zhang, Yilin Zhao, Subhashree Radhakrishnan, et al. Eagle 2: Building post-training data strategies from scratch for frontier vision-language models. arXiv preprint arXiv:2501.14818, 2025. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.147, + 0.36, + 0.862, + 0.402 + ], + "angle": 0, + "content": "[70] Hunter Lightman, Vineet Kosaraju, Yuri Burda, Harrison Edwards, Bowen Baker, Teddy Lee, Jan Leike, John Schulman, Ilya Sutskever, and Karl Cobbe. Let's verify step by step. In The Twelfth International Conference on Learning Representations, 2023. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.147, + 0.407, + 0.862, + 0.449 + ], + "angle": 0, + "content": "[71] Ji Lin, Hongxu Yin, Wei Ping, Pavlo Molchanov, Mohammad Shoeybi, and Song Han. Vila: On pre-training for visual language models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 26689-26699, 2024. 1, 9, 10, 15, 16" + }, + { + "type": "ref_text", + "bbox": [ + 0.147, + 0.453, + 0.862, + 0.483 + ], + "angle": 0, + "content": "[72] Adam Dahlgren Lindström and Savitha Sam Abraham. Clevr-math: A dataset for compositional language, visual and mathematical reasoning. arXiv preprint arXiv:2208.05358, 2022. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.147, + 0.488, + 0.862, + 0.517 + ], + "angle": 0, + "content": "[73] Haotian Liu, Chunyuan Li, Qingyang Wu, and Yong Jae Lee. Visual instruction tuning. Advances in Neural Information Processing Systems, 36, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.147, + 0.522, + 0.862, + 0.564 + ], + "angle": 0, + "content": "[74] Shilong Liu, Zhaoyang Zeng, Tianhe Ren, Feng Li, Hao Zhang, Jie Yang, Qing Jiang, Chunyuan Li, Jianwei Yang, Hang Su, et al. Grounding dino: Marrying dino with grounded pre-training for open-set object detection. In European Conference on Computer Vision, pages 38-55. Springer, 2025. 13" + }, + { + "type": "ref_text", + "bbox": [ + 0.147, + 0.569, + 0.862, + 0.609 + ], + "angle": 0, + "content": "[75] Yuan Liu, Haodong Duan, Yuanhan Zhang, Bo Li, Songyang Zhang, Wangbo Zhao, Yike Yuan, Jiaqi Wang, Conghui He, Ziwei Liu, et al. Mmbench: Is your multi-modal model an all-around player? arXiv preprint arXiv:2307.06281, 2023. 12" + }, + { + "type": "ref_text", + "bbox": [ + 0.147, + 0.616, + 0.862, + 0.657 + ], + "angle": 0, + "content": "[76] Yuliang Liu, Zhang Li, Hongliang Li, Wenwen Yu, Mingxin Huang, Dezhi Peng, Mingyu Liu, Mingrui Chen, Chunyuan Li, Lianwen Jin, et al. On the hidden mystery ofOCR in large multimodal models. arXiv preprint arXiv:2305.07895, 2023. 8, 10" + }, + { + "type": "ref_text", + "bbox": [ + 0.147, + 0.663, + 0.862, + 0.692 + ], + "angle": 0, + "content": "[77] Zihan Liu, Yang Chen, Mohammad Shoeybi, Bryan Catanzaro, and Wei Ping. Acemath: Advancing frontier math reasoning with post-training and reward modeling. arXiv preprint, 2024. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.147, + 0.697, + 0.862, + 0.726 + ], + "angle": 0, + "content": "[78] Zuyan Liu, Yuhao Dong, Ziwei Liu, Winston Hu, Jiwen Lu, and Yongming Rao. Oryx mllm: On-demand spatial-temporal understanding at arbitrary resolution. arXiv preprint arXiv:2409.12961, 2024. 15" + }, + { + "type": "ref_text", + "bbox": [ + 0.147, + 0.731, + 0.862, + 0.772 + ], + "angle": 0, + "content": "[79] Dakuan Lu, Xiaoyu Tan, Rui Xu, Tianchu Yao, Chao Qu, Wei Chu, Yinghui Xu, and Yuan Qi. Scp-116k: A high-quality problem-solution dataset and a generalized pipeline for automated extraction in the higher education science domain, 2025. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.147, + 0.778, + 0.862, + 0.82 + ], + "angle": 0, + "content": "[80] Pan Lu, Hritik Bansal, Tony Xia, Jiacheng Liu, Chunyuan Li, Hannaneh Hajishirzi, Hao Cheng, Kai-Wei Chang, Michel Galley, and Jianfeng Gao. Mathvista: Evaluating mathematical reasoning of foundation models in visual contexts. arXiv preprint arXiv:2310.02255, 2023. 7, 8, 9" + }, + { + "type": "ref_text", + "bbox": [ + 0.147, + 0.825, + 0.862, + 0.865 + ], + "angle": 0, + "content": "[81] Pan Lu, Ran Gong, Shibiao Jiang, Liang Qiu, Siyuan Huang, Xiaodan Liang, and Song-Chun Zhu. Inter-gps: Interpretable geometry problem solving with formal language and symbolic reasoning. arXiv preprint arXiv:2105.04165, 2021. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.147, + 0.872, + 0.862, + 0.913 + ], + "angle": 0, + "content": "[82] Pan Lu, Swaroop Mishra, Tanglin Xia, Liang Qiu, Kai-Wei Chang, Song-Chun Zhu, Oyvind Tafjord, Peter Clark, and Ashwin Kalyan. Learn to explain: Multimodal reasoning via thought chains for science question answering. Advances in Neural Information Processing Systems, 35:2507-2521, 2022. 6" + }, + { + "type": "list", + "bbox": [ + 0.146, + 0.092, + 0.865, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "23" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.146, + 0.092, + 0.86, + 0.131 + ], + "angle": 0, + "content": "[83] Pan Lu, Liang Qiu, Jiaqi Chen, Tony Xia, Yizhou Zhao, Wei Zhang, Zhou Yu, Xiaodan Liang, and Song-Chun Zhu. Iconqa: A new benchmark for abstract diagram understanding and visual language reasoning. arXiv preprint arXiv:2110.13214, 2021.6" + }, + { + "type": "ref_text", + "bbox": [ + 0.147, + 0.139, + 0.86, + 0.167 + ], + "angle": 0, + "content": "[84] Shiyin Lu, Yang Li, Qing-Guo Chen, Zhao Xu, Weihua Luo, Kaifu Zhang, and Han-Jia Ye. Ovis: Structural embedding alignment for multimodal large language model. arXiv preprint arXiv:2405.20797, 2024. 9, 10" + }, + { + "type": "ref_text", + "bbox": [ + 0.147, + 0.174, + 0.862, + 0.214 + ], + "angle": 0, + "content": "[85] Xudong Lu, Yinghao Chen, Cheng Chen, Hui Tan, Boheng Chen, Yina Xie, Rui Hu, Guanxin Tan, Renshou Wu, Yan Hu, et al. Bluelm-v-3b: Algorithm and system co-design for multimodal large language models on mobile devices. arXiv preprint arXiv:2411.10640, 2024. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.148, + 0.223, + 0.86, + 0.25 + ], + "angle": 0, + "content": "[86] Yujie Lu, Dongfu Jiang, Wenhu Chen, William Yang Wang, Yejin Choi, and Bill Yuchen Lin. Wildvision: Evaluating vision-language models in the wild with human preferences. arXiv preprint arXiv:2406.11069, 2024. 11" + }, + { + "type": "ref_text", + "bbox": [ + 0.147, + 0.258, + 0.862, + 0.297 + ], + "angle": 0, + "content": "[87] Liangchen Luo, Yinxiao Liu, Rosanne Liu, Samrat Phatale, Harsh Lara, Yunxuan Li, Lei Shu, Yun Zhu, Lei Meng, Jiao Sun, et al. Improve mathematical reasoning in language models by automated process supervision. arXiv preprint arXiv:2406.06592, 2, 2024. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.147, + 0.305, + 0.861, + 0.344 + ], + "angle": 0, + "content": "[88] Junhua Mao, Jonathan Huang, Alexander Toshev, Oana Camburu, Alan L Yuille, and Kevin Murphy. Generation and comprehension of unambiguous object descriptions. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11–20, 2016. 13" + }, + { + "type": "ref_text", + "bbox": [ + 0.147, + 0.353, + 0.861, + 0.391 + ], + "angle": 0, + "content": "[89] Andrés Marafioti, Orr Zohar, Miquel Farré, Merve Noyan, Elie Bakouch, Pedro Cuenca, Cyril Zakka, Loubna Ben Allal, Anton Lozhkov, Nouamane Tazi, et al. Smolvlm: Redefining small and efficient multimodal models. arXiv preprint arXiv:2504.05299, 2025. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.148, + 0.401, + 0.861, + 0.439 + ], + "angle": 0, + "content": "[90] Kenneth Marino, Mohammad Rastegari, Ali Farhadi, and Roozbeh Mottaghi. Ok-vqa: A visual question answering benchmark requiring external knowledge. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3195-3204, 2019. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.147, + 0.448, + 0.861, + 0.487 + ], + "angle": 0, + "content": "[91] Ahmed Masry, Xuan Long Do, Jia Qing Tan, Shafiq Joty, and Enamul Hoque. Chartqa: A benchmark for question answering about charts with visual and logical reasoning. In Proceedings of the Annual Meeting of the Association for Computational Linguistics, pages 2263-2279, 2022. 6, 7, 8, 10" + }, + { + "type": "ref_text", + "bbox": [ + 0.148, + 0.496, + 0.862, + 0.533 + ], + "angle": 0, + "content": "[92] Minesh Mathew, Viraj Bagal, Ruben Tito, Dimosthenis Karatzas, Ernest Valveny, and CV Jawahar. Infographicvqa. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pages 1697-1706, 2022. 6, 7, 8, 10" + }, + { + "type": "ref_text", + "bbox": [ + 0.147, + 0.543, + 0.862, + 0.581 + ], + "angle": 0, + "content": "[93] Minesh Mathew, Dimosthenis Karatzas, and CV Jawahar. Docvqa: A dataset for vqa on document images. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pages 2200–2209, 2021. 7, 8, 10" + }, + { + "type": "ref_text", + "bbox": [ + 0.148, + 0.591, + 0.862, + 0.617 + ], + "angle": 0, + "content": "[94] Nat McAleese, Rai Michael Pokorny, Juan Felipe Ceron Uribe, Evgenia Nitishinskaya, Maja Trebacz, and Jan Leike. Llm critics help catch llm bugs. arXiv preprint arXiv:2407.00215, 2024. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.147, + 0.626, + 0.861, + 0.663 + ], + "angle": 0, + "content": "[95] Fanqing Meng, Jin Wang, Chuanhao Li, Quanfeng Lu, Hao Tian, Jiaqi Liao, Xizhou Zhu, Jifeng Dai, Yu Qiao, Ping Luo, et al. Mmiu: Multimodal multi-image understanding for evaluating large vision-language models. arXiv preprint arXiv:2408.02718, 2024. 9, 11" + }, + { + "type": "ref_text", + "bbox": [ + 0.147, + 0.673, + 0.862, + 0.71 + ], + "angle": 0, + "content": "[96] Anand Mishra, Shashank Shekhar, Ajeet Kumar Singh, and Anirban Chakraborty. Ocr-vqa: Visual question answering by reading text in images. In International Conference on Document Analysis and Recognition, pages 947-952, 2019. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.147, + 0.721, + 0.862, + 0.746 + ], + "angle": 0, + "content": "[97] OpenAI. Gpt-4v(ison) system card. https://cdn.openai.com/papers/GPTV_System/Card.pdf, 2023.1,8,9,10,11,12,14,15,16" + }, + { + "type": "ref_text", + "bbox": [ + 0.148, + 0.756, + 0.819, + 0.77 + ], + "angle": 0, + "content": "[98] OpenAI. Gpt-4o system card. https://openai.com/index/gpt-4o-system-card/, 2025.2,8" + }, + { + "type": "ref_text", + "bbox": [ + 0.148, + 0.778, + 0.862, + 0.817 + ], + "angle": 0, + "content": "[99] Runqi Qiao, Qiuna Tan, Guanting Dong, Minhui Wu, Chong Sun, Xiaoshuai Song, Zhuoma GongQue, Shanglin Lei, Zhe Wei, Miaoxuan Zhang, et al. We-math: Does your large multimodal model achieve human-like mathematical reasoning? arXiv preprint arXiv:2407.01284, 2024. 8, 9" + }, + { + "type": "ref_text", + "bbox": [ + 0.14, + 0.826, + 0.862, + 0.863 + ], + "angle": 0, + "content": "[100] Yujia Qin, Yining Ye, Junjie Fang, Haoming Wang, Shihao Liang, Shizuo Tian, Junda Zhang, Jiahao Li, Yunxin Li, Shijue Huang, et al. Ui-tars: Pioneering automated gui interaction with native agents. arXiv preprint arXiv:2501.12326, 2025. 16" + }, + { + "type": "ref_text", + "bbox": [ + 0.14, + 0.873, + 0.862, + 0.911 + ], + "angle": 0, + "content": "[101] Rafael Rafailov, Archit Sharma, Eric Mitchell, Christopher D Manning, Stefano Ermon, and Chelsea Finn. Direct preference optimization: Your language model is secretly a reward model. Advances in Neural Information Processing Systems, 36, 2024. 6" + }, + { + "type": "list", + "bbox": [ + 0.14, + 0.092, + 0.862, + 0.911 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "24" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.139, + 0.092, + 0.86, + 0.133 + ], + "angle": 0, + "content": "[102] Machel Reid, Nikolay Savinov, Denis Teplyashin, Dmitry Lepikhin, Timothy Lillicrap, Jean-baptiste Alayrac, Radu Soricut, Angeliki Lazaridou, Orhan Firat, Julian Schrittwieser, et al. Gemini 1.5: Unlocking multimodal understanding across millions of tokens of context. arXiv preprint arXiv:2403.05530, 2024. 10, 11, 12, 15, 16" + }, + { + "type": "ref_text", + "bbox": [ + 0.137, + 0.139, + 0.861, + 0.178 + ], + "angle": 0, + "content": "[103] Keisuke Sakaguchi, Ronan Le Bras, Chandra Bhagavatula, and Yejin Choi. Winogrande: An adversarial winograd schema challenge at scale. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 34, pages 8732-8740, 2020. 16" + }, + { + "type": "ref_text", + "bbox": [ + 0.137, + 0.186, + 0.862, + 0.226 + ], + "angle": 0, + "content": "[104] Minjoon Seo, Hannaneh Hajishirzi, Ali Farhadi, Oren Etzioni, and Clint Malcolm. Solving geometry problems: Combining text and diagram interpretation. In Proceedings of the 2015 conference on empirical methods in natural language processing, pages 1466-1476, 2015. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.138, + 0.233, + 0.861, + 0.274 + ], + "angle": 0, + "content": "[105] Min Shi, Fuxiao Liu, Shihao Wang, Shijia Liao, Subhashree Radhakrishnan, De-An Huang, Hongxu Yin, Karan Sapra, Yaser Yacoob, Humphrey Shi, et al. Eagle: Exploring the design space for multimodal llms with mixture of encoders. arXiv preprint arXiv:2408.15998, 2024. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.138, + 0.281, + 0.862, + 0.32 + ], + "angle": 0, + "content": "[106] Wenhao Shi, Zhiqiang Hu, Yi Bin, Junhua Liu, Yang Yang, See-Kiong Ng, Lidong Bing, and Roy Ka-Wei Lee. Math-llava: Bootstrapping mathematical reasoning for multimodal large language models. arXiv preprint arXiv:2406.17294, 2024. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.138, + 0.328, + 0.862, + 0.368 + ], + "angle": 0, + "content": "[107] Amanpreet Singh, Vivek Natarajan, Meet Shah, Yu Jiang, Xinlei Chen, Dhruv Batra, Devi Parikh, and Marcus Rohrbach. Towards vqa models that can read. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8317-8326, 2019. 6, 8, 10" + }, + { + "type": "ref_text", + "bbox": [ + 0.138, + 0.375, + 0.861, + 0.402 + ], + "angle": 0, + "content": "[108] Charlie Snell, Jaehoon Lee, Kelvin Xu, and Aviral Kumar. Scaling llm test-time compute optimally can be more effective than scaling model parameters. arXiv preprint arXiv:2408.03314, 2024. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.138, + 0.409, + 0.861, + 0.437 + ], + "angle": 0, + "content": "[109] Hai-Long Sun, Da-Wei Zhou, Yang Li, Shiyin Lu, Chao Yi, Qing-Guo Chen, Zhao Xu, Weihua Luo, Kaifu Zhang, De-Chuan Zhan, et al. Parrot: Multilingual visual instruction tuning. arXiv preprint arXiv:2406.02539, 2024. 14" + }, + { + "type": "ref_text", + "bbox": [ + 0.138, + 0.444, + 0.86, + 0.471 + ], + "angle": 0, + "content": "[110] Kai Sun, Dian Yu, Dong Yu, and Claire Cardie. Investigating prior knowledge for challenging Chinese machine reading comprehension. Transactions of the Association for Computational Linguistics, 8:141-155, 2020. 16" + }, + { + "type": "ref_text", + "bbox": [ + 0.138, + 0.478, + 0.862, + 0.517 + ], + "angle": 0, + "content": "[111] Zhiqing Sun, Sheng Shen, Shengcao Cao, Haotian Liu, Chunyuan Li, Yikang Shen, Chuang Gan, Liang-Yan Gui, Yu-Xiong Wang, Yiming Yang, et al. Aligning large multimodal models with factually augmented rlhf. arXiv preprint arXiv:2309.14525, 2023. 12, 13" + }, + { + "type": "ref_text", + "bbox": [ + 0.138, + 0.525, + 0.861, + 0.564 + ], + "angle": 0, + "content": "[112] Mirac Suzgun, Nathan Scales, Nathanael Schärli, Sebastian Gehrmann, Yi Tay, Hyung Won Chung, Aakanksha Chowdhery, Quoc V Le, Ed H Chi, Denny Zhou, et al. Challenging big-bench tasks and whether chain-of-thought can solve them. arXiv preprint arXiv:2210.09261, 2022. 16" + }, + { + "type": "ref_text", + "bbox": [ + 0.138, + 0.572, + 0.862, + 0.611 + ], + "angle": 0, + "content": "[113] Jingqun Tang, Qi Liu, Yongjie Ye, Jinghui Lu, Shu Wei, Chunhui Lin, Wanqing Li, Mohamad Fitri Faiz Bin Mahmood, Hao Feng, Zhen Zhao, et al. Mtvqa: Benchmarking multilingual text-centric visual question answering. arXiv preprint arXiv:2405.11985, 2024. 14" + }, + { + "type": "ref_text", + "bbox": [ + 0.138, + 0.619, + 0.862, + 0.658 + ], + "angle": 0, + "content": "[114] Gemini Team, Rohan Anil, Sebastian Borgeaud, Yonghui Wu, Jean-Baptiste Alayrac, Jiahui Yu, Radu Soricut, Johan Schalkwyk, Andrew M Dai, Anja Hauth, et al. Gemini: a family of highly capable multimodal models. arXiv preprint arXiv:2312.11805, 2023. 1, 14" + }, + { + "type": "ref_text", + "bbox": [ + 0.138, + 0.666, + 0.588, + 0.681 + ], + "angle": 0, + "content": "[115] Qwen Team. Qvq: To see the world with wisdom, December 2024. 9" + }, + { + "type": "ref_text", + "bbox": [ + 0.138, + 0.688, + 0.862, + 0.728 + ], + "angle": 0, + "content": "[116] Shengbang Tong, Ellis Brown, Penghao Wu, Sanghyun Woo, Manoj Middepogu, Sai Charitha Akula, Jihan Yang, Shusheng Yang, Adithya Iyer, Xichen Pan, et al. Cambrian-1: A fully open, vision-centric exploration of multimodal llms. arXiv preprint arXiv:2406.16860, 2024. 9, 10, 11, 12" + }, + { + "type": "ref_text", + "bbox": [ + 0.138, + 0.735, + 0.86, + 0.75 + ], + "angle": 0, + "content": "[117] v DeepMind. Gemini 2.5 pro. https://deepmind.google/technologies/gemini/pro/, 2025. 1, 2, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.138, + 0.757, + 0.86, + 0.797 + ], + "angle": 0, + "content": "[118] Fei Wang, Xingyu Fu, James Y Huang, Zekun Li, Qin Liu, Xiaogeng Liu, Mingyu Derek Ma, Nan Xu, Wenxuan Zhou, Kai Zhang, et al. Muirbench: A comprehensive benchmark for robust multi-image understanding. arXiv preprint arXiv:2406.09411, 2024. 9, 11" + }, + { + "type": "ref_text", + "bbox": [ + 0.138, + 0.804, + 0.86, + 0.831 + ], + "angle": 0, + "content": "[119] Ke Wang, Junting Pan, Weikang Shi, Zimu Lu, Mingjie Zhan, and Hongsheng Li. Measuring multimodal mathematical reasoning with math-vision dataset. arXiv preprint arXiv:2402.14804, 2024. 8, 9" + }, + { + "type": "ref_text", + "bbox": [ + 0.138, + 0.839, + 0.86, + 0.866 + ], + "angle": 0, + "content": "[120] Peiyi Wang, Lei Li, Zhihong Shao, RX Xu, Damai Dai, Yifei Li, Deli Chen, Yu Wu, and Zhifang Sui. Math-shepherd: Verify and reinforce lms step-by-step without human annotations. arXiv preprint arXiv:2312.08935, 2023. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.138, + 0.873, + 0.86, + 0.912 + ], + "angle": 0, + "content": "[121] Peng Wang, Shuai Bai, Sinan Tan, Shijie Wang, Zhihao Fan, Jinze Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, et al. Qwen2-vl: Enhancing vision-language model's perception of the world at any resolution. arXiv preprint arXiv:2409.12191, 2024. 1, 8, 10, 11, 12, 13, 14, 15" + }, + { + "type": "list", + "bbox": [ + 0.137, + 0.092, + 0.862, + 0.912 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "25" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.139, + 0.092, + 0.862, + 0.12 + ], + "angle": 0, + "content": "[122] Peng Wang, Shijie Wang, Junyang Lin, Shuai Bai, Xiaohuan Zhou, Jingren Zhou, Xinggang Wang, and Chang Zhou. One-peace: Exploring one general representation model toward unlimited modalities. arXiv:2305.11172, 2023. 13" + }, + { + "type": "ref_text", + "bbox": [ + 0.137, + 0.128, + 0.861, + 0.157 + ], + "angle": 0, + "content": "[123] Weihan Wang, Qingsong Lv, Wenmeng Yu, Wenyi Hong, Ji Qi, Yan Wang, Junhui Ji, Zhuoyi Yang, Lei Zhao, Xixuan Song, et al. Cogvlm: Visual expert for pretrained language models. arXiv preprint arXiv:2311.03079, 2023. 1, 13" + }, + { + "type": "ref_text", + "bbox": [ + 0.138, + 0.165, + 0.862, + 0.205 + ], + "angle": 0, + "content": "[124] Weiyun Wang, Zhe Chen, Wenhai Wang, Yue Cao, Yangzhou Liu, Zhangwei Gao, Jinguo Zhu, Xizhou Zhu, Lewei Lu, Yu Qiao, and Jifeng Dai. Enhancing the reasoning ability of multimodal large language models via mixed preference optimization. arXiv preprint arXiv:2411.10442, 2024. 2, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.138, + 0.214, + 0.862, + 0.254 + ], + "angle": 0, + "content": "[125] Weiyun Wang, Zhangwei Gao, Lianjie Chen, Zhe Chen, Jinguo Zhu, Xiangyu Zhao, Yangzhou Liu, Yue Cao, Shenglong Ye, Xizhou Zhu, et al. Visualprm: An effective process reward model for multimodal reasoning. arXiv preprint arXiv:2503.10291, 2025. 2, 7, 8, 9" + }, + { + "type": "ref_text", + "bbox": [ + 0.138, + 0.263, + 0.862, + 0.303 + ], + "angle": 0, + "content": "[126] Weiyun Wang, Yiming Ren, Haowen Luo, Tiantong Li, Chenxiang Yan, Zhe Chen, Wenhai Wang, Qingyun Li, Lewei Lu, Xizhou Zhu, et al. The all-seeing project v2: Towards general relation comprehension of the open world. arXiv preprint arXiv:2402.19474, 2024. 6, 12, 13" + }, + { + "type": "ref_text", + "bbox": [ + 0.138, + 0.312, + 0.862, + 0.352 + ], + "angle": 0, + "content": "[127] Weiyun Wang, Min Shi, Qingyun Li, Wenhai Wang, Zhenhang Huang, Linjie Xing, Zhe Chen, Hao Li, Xizhou Zhu, Zhiguo Cao, et al. The all-seeing project: Towards panoptic visual recognition and understanding of the open world. In The International Conference on Learning Representations, 2024. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.138, + 0.361, + 0.862, + 0.401 + ], + "angle": 0, + "content": "[128] Zirui Wang, Mengzhou Xia, Luxi He, Howard Chen, Yitao Liu, Richard Zhu, Kaiqu Liang, Xindi Wu, Haotian Liu, Sadhika Malladi, et al. Charxiv: Charting gaps in realistic chart understanding in multimodal llms. arXiv preprint arXiv:2406.18521, 2024. 8, 10" + }, + { + "type": "ref_text", + "bbox": [ + 0.138, + 0.41, + 0.862, + 0.438 + ], + "angle": 0, + "content": "[129] Haoning Wu, Dongxu Li, Bei Chen, and Junnan Li. Longvideobench: A benchmark for long-context interleaved video-language understanding. arXiv preprint arXiv:2407.15754, 2024. 8, 14, 15" + }, + { + "type": "ref_text", + "bbox": [ + 0.138, + 0.446, + 0.862, + 0.485 + ], + "angle": 0, + "content": "[130] Zhiyong Wu, Zhenyu Wu, Fangzhi Xu, Yian Wang, Qiushi Sun, Chengyou Jia, Kanzhi Cheng, Zichen Ding, Liheng Chen, Paul Pu Liang, et al. Os-atlas: A foundation action model for generalist gui agents. arXiv preprint arXiv:2410.23218, 2024. 16" + }, + { + "type": "ref_text", + "bbox": [ + 0.138, + 0.495, + 0.862, + 0.523 + ], + "angle": 0, + "content": "[131] Yijia Xiao, Edward Sun, Tianyu Liu, and Wei Wang. Logicvista: Multimodal llm logical reasoning benchmark in visual contexts. arXiv preprint arXiv:2407.04973, 2024. 8, 9" + }, + { + "type": "ref_text", + "bbox": [ + 0.138, + 0.531, + 0.862, + 0.56 + ], + "angle": 0, + "content": "[132] Yiheng Xu, Zekun Wang, Junli Wang, Dunjie Lu, Tianbao Xie, Amrita Saha, Doyen Sahoo, Tao Yu, and Caiming Xiong. Aguvis: Unified pure vision agents for autonomous gui interaction. 2024. 16" + }, + { + "type": "ref_text", + "bbox": [ + 0.138, + 0.568, + 0.862, + 0.607 + ], + "angle": 0, + "content": "[133] B. Yan, Yi Jiang, Jiannan Wu, D. Wang, Ping Luo, Zehuan Yuan, and Hutchuan Lu. Universal instance perception as object discovery and retrieval. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2023. 13" + }, + { + "type": "ref_text", + "bbox": [ + 0.138, + 0.617, + 0.862, + 0.645 + ], + "angle": 0, + "content": "[134] Jihan Yang, Shusheng Yang, Anjali Gupta, Rilyn Han, Li Fei-Fei, and Saining Xie. Thinking in Space: How Multimodal Large Language Models See, Remember and Recall Spaces. arXiv preprint arXiv:2412.14171, 2024. 16" + }, + { + "type": "ref_text", + "bbox": [ + 0.138, + 0.653, + 0.862, + 0.682 + ], + "angle": 0, + "content": "[135] Yuan Yao, Tianyu Yu, Ao Zhang, Chongyi Wang, Junbo Cui, Hongji Zhu, Tianchi Cai, Haoyu Li, Weilin Zhao, Zhihui He, et al. Minicpm-v: A gpt-4v level mllm on your phone. arXiv preprint arXiv:2408.01800, 2024. 9, 10, 11, 12, 15" + }, + { + "type": "ref_text", + "bbox": [ + 0.138, + 0.689, + 0.862, + 0.73 + ], + "angle": 0, + "content": "[136] Qinghao Ye, Haiyang Xu, Jiabo Ye, Ming Yan, Haowei Liu, Qi Qian, Ji Zhang, Fei Huang, and Jingren Zhou. mplug-owl2: Revolutionizing multi-modal large language model with modality collaboration. arXiv preprint arXiv:2311.04257, 2023. 1, 14" + }, + { + "type": "ref_text", + "bbox": [ + 0.138, + 0.738, + 0.862, + 0.79 + ], + "angle": 0, + "content": "[137] Kaining Ying, Fanqing Meng, Jin Wang, Zhiqian Li, Han Lin, Yue Yang, Hao Zhang, Wenbo Zhang, Yuqi Lin, Shuo Liu, Jiayi Lei, Quanfeng Lu, Runjian Chen, Peng Xu, Renrui Zhang, Haozhe Zhang, Peng Gao, Yali Wang, Yu Qiao, Ping Luo, Kaipeng Zhang, and Wenqi Shao. Mmt-bench: A comprehensive multimodal benchmark for evaluating large vision-language models towards multitask agi. arXiv preprint arXiv:2404.16006, 2024. 9, 11" + }, + { + "type": "ref_text", + "bbox": [ + 0.138, + 0.8, + 0.862, + 0.828 + ], + "angle": 0, + "content": "[138] Weihao Yu, Zhengyuan Yang, Linjie Li, Jianfeng Wang, Kevin Lin, Zicheng Liu, Xinchao Wang, and Lijuan Wang. Mm-vet: Evaluating large multimodal models for integrated capabilities. arXiv preprint arXiv:2308.02490, 2023. 12" + }, + { + "type": "ref_text", + "bbox": [ + 0.138, + 0.836, + 0.862, + 0.876 + ], + "angle": 0, + "content": "[139] Weihao Yu, Zhengyuan Yang, Linfeng Ren, Linjie Li, Jianfeng Wang, Kevin Lin, Chung-Ching Lin, Zicheng Liu, Lijuan Wang, and Xinchao Wang. Mm-vet2: A challenging benchmark to evaluate large multimodal models for integrated capabilities. arXiv preprint arXiv:2408.00765, 2024. 12" + }, + { + "type": "ref_text", + "bbox": [ + 0.138, + 0.885, + 0.862, + 0.913 + ], + "angle": 0, + "content": "[140] Ya-Qi Yu, Minghui Liao, Jiwen Zhang, and Jihao Wu. Texthawk2: A large vision-language model excels in bilingualOCR and grounding with 16x fewer tokens. arXiv preprint arXiv:2410.05261, 2024. 13" + }, + { + "type": "list", + "bbox": [ + 0.137, + 0.092, + 0.862, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.51, + 0.948 + ], + "angle": 0, + "content": "26" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.139, + 0.092, + 0.86, + 0.133 + ], + "angle": 0, + "content": "[141] Xiang Yue, Yuansheng Ni, Kai Zhang, Tianyu Zheng, Ruoqi Liu, Ge Zhang, Samuel Stevens, Dongfu Jiang, Weiming Ren, Yuxuan Sun, et al. Mmmu: A massive multi-discipline multimodal understanding and reasoning benchmark for expert agi. arXiv preprint arXiv:2311.16502, 2023. 2, 7, 8, 9" + }, + { + "type": "ref_text", + "bbox": [ + 0.137, + 0.139, + 0.861, + 0.18 + ], + "angle": 0, + "content": "[142] Rowan Zellers, Ari Holtzman, Yonatan Bisk, Ali Farhadi, and Yejin Choi. Hellaswag: Can a machine really finish your sentence? In Proceedings of the Annual Meeting of the Association for Computational Linguistics, pages 4791-4800, 2019. 16" + }, + { + "type": "ref_text", + "bbox": [ + 0.138, + 0.187, + 0.862, + 0.228 + ], + "angle": 0, + "content": "[143] Haotian Zhang, Mingfei Gao, Zhe Gan, Philipp Dufter, Nina Wenzel, Forrest Huang, Dhruti Shah, Xianzhi Du, Bowen Zhang, Yanghao Li, et al. Mm1.5: Methods, analysis & insights from multimodal llm fine-tuning. arXiv preprint arXiv:2409.20566, 2024. 13" + }, + { + "type": "ref_text", + "bbox": [ + 0.138, + 0.235, + 0.862, + 0.276 + ], + "angle": 0, + "content": "[144] Haotian Zhang, Haoxuan You, Philipp Dufter, Bowen Zhang, Chen Chen, Hong-You Chen, Tsu-Jui Fu, William Yang Wang, Shih-Fu Chang, Zhe Gan, et al. Ferret-v2: An improved baseline for referring and grounding with large language models. arXiv preprint arXiv:2404.07973, 2024. 13" + }, + { + "type": "ref_text", + "bbox": [ + 0.138, + 0.283, + 0.862, + 0.323 + ], + "angle": 0, + "content": "[145] Peiyuan Zhang, Kaichen Zhang, Bo Li, Guangtao Zeng, Jingkang Yang, Yuanhan Zhang, Ziyue Wang, Haoran Tan, Chunyuan Li, and Ziwei Liu. Long context transfer from language to vision. arXiv preprint arXiv:2406.16852, 2024. 16" + }, + { + "type": "ref_text", + "bbox": [ + 0.138, + 0.33, + 0.862, + 0.372 + ], + "angle": 0, + "content": "[146] Renrui Zhang, Dongzhi Jiang, Yichi Zhang, Haokun Lin, Ziyu Guo, Pengshuo Qiu, Aojun Zhou, Pan Lu, Kai-Wei Chang, Peng Gao, et al. Mathverse: Does your multi-modal llm truly see the diagrams in visual math problems? arXiv preprint arXiv:2403.14624, 2024.8, 9" + }, + { + "type": "ref_text", + "bbox": [ + 0.138, + 0.379, + 0.862, + 0.418 + ], + "angle": 0, + "content": "[147] Renrui Zhang, Xinyu Wei, Dongzhi Jiang, Yichi Zhang, Ziyu Guo, Chengzhuo Tong, Jiaming Liu, Aojun Zhou, Bin Wei, Shanghang Zhang, et al. Mavis: Mathematical visual instruction tuning. arXiv preprint arXiv:2407.08739, 2024.6" + }, + { + "type": "ref_text", + "bbox": [ + 0.138, + 0.426, + 0.862, + 0.455 + ], + "angle": 0, + "content": "[148] Tianyu Zhang, Suyuchen Wang, Lu Li, Ge Zhang, Perouz Taslakian, Sai Rajeswar, Jie Fu, Bang Liu, and Yoshua Bengio. Vcr: Visual caption restoration. arXiv preprint arXiv:2406.06462, 2024. 8, 10" + }, + { + "type": "ref_text", + "bbox": [ + 0.137, + 0.461, + 0.862, + 0.49 + ], + "angle": 0, + "content": "[149] Xiaotian Zhang, Chunyang Li, Yi Zong, Zhengyu Ying, Liang He, and Xipeng Qiu. Evaluating the performance of large language models on gaokao benchmark. arXiv preprint arXiv:2305.12474, 2023. 16" + }, + { + "type": "ref_text", + "bbox": [ + 0.137, + 0.497, + 0.862, + 0.525 + ], + "angle": 0, + "content": "[150] Y Zhang, B Li, H Liu, Y Lee, L Gui, D Fu, J Feng, Z Liu, and C Li. Llava next: A strong zero-shot video understanding model. 2024. 16" + }, + { + "type": "ref_text", + "bbox": [ + 0.138, + 0.532, + 0.862, + 0.573 + ], + "angle": 0, + "content": "[151] Yi-Fan Zhang, Huanyu Zhang, Haochen Tian, Chaoyou Fu, Shuangqing Zhang, Junfei Wu, Feng Li, Kun Wang, Qingsong Wen, Zhang Zhang, et al. Mme-realworld: Could your multimodal llm challenge high-resolution real-world scenarios that are difficult for humans? arXiv preprint arXiv:2408.13257, 2024. 11" + }, + { + "type": "ref_text", + "bbox": [ + 0.138, + 0.58, + 0.862, + 0.62 + ], + "angle": 0, + "content": "[152] Zhenru Zhang, Chujie Zheng, Yangzhen Wu, Beichen Zhang, Runji Lin, Bowen Yu, Dayiheng Liu, Jingren Zhou, and Junyang Lin. The lessons of developing process reward models in mathematical reasoning. arXiv preprint arXiv:2501.07301, 2025. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.138, + 0.628, + 0.862, + 0.668 + ], + "angle": 0, + "content": "[153] Bingchen Zhao, Yongshuo Zong, Letian Zhang, and Timothy Hospedales. Benchmarking multi-image understanding in vision and language models: Perception, knowledge, reasoning, and multi-hop reasoning. arXiv preprint arXiv:2406.12742, 2024. 9, 11" + }, + { + "type": "ref_text", + "bbox": [ + 0.138, + 0.676, + 0.862, + 0.716 + ], + "angle": 0, + "content": "[154] Junjie Zhou, Yan Shu, Bo Zhao, Boya Wu, Shitao Xiao, Xi Yang, Yongping Xiong, Bo Zhang, Tiejun Huang, and Zheng Liu. Mlvu: A comprehensive benchmark for multi-task long video understanding. arXiv preprint arXiv:2406.04264, 2024. 14, 15" + }, + { + "type": "ref_text", + "bbox": [ + 0.138, + 0.724, + 0.862, + 0.763 + ], + "angle": 0, + "content": "[155] Chengke Zou, Xingang Guo, Rui Yang, Junyu Zhang, Bin Hu, and Huan Zhang. Dynamath: A dynamic visual benchmark for evaluating mathematical reasoning robustness of vision language models. arXiv preprint arXiv:2411.00836, 2024. 8, 9" + }, + { + "type": "list", + "bbox": [ + 0.137, + 0.092, + 0.862, + 0.763 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "27" + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_10xxx/2504.10479/71273ce6-5170-4939-8354-af535b974810_origin.pdf b/data/2025/2504_10xxx/2504.10479/71273ce6-5170-4939-8354-af535b974810_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..484dfdd2ff6cb85e3c8189065ac4dea0aaf1406a --- /dev/null +++ b/data/2025/2504_10xxx/2504.10479/71273ce6-5170-4939-8354-af535b974810_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:90526cbc7819eaabf491b7daaa4f76af60d3e83e700f66138aff7697ec641abf +size 898773 diff --git a/data/2025/2504_10xxx/2504.10479/full.md b/data/2025/2504_10xxx/2504.10479/full.md new file mode 100644 index 0000000000000000000000000000000000000000..f4dd38a1e40ed233b66656f19cc4b3e1ced27937 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10479/full.md @@ -0,0 +1,521 @@ +# InternVL3: Exploring Advanced Training and Test-Time Recipes for Open-Source Multimodal Models + +Jinguo Zhu $^{1*}$ , Weiyun Wang $^{5,1*†}$ , Zhe Chen $^{4,1*†}$ , Zhaoyang Liu $^{1*†}$ , Shenglong Ye $^{1*}$ , Lixin Gu $^{1*}$ , Hao Tian $^{2*}$ , Yuchen Duan $^{6,1*†}$ , Weijie Su $^{1}$ , Jie Shao $^{4,1†}$ , Zhangwei Gao $^{7,1†}$ , Erfei Cui $^{7,1†}$ , Xuehui Wang $^{7,1†}$ , Yue Cao $^{4,1†}$ , Yangzhou Liu $^{4,1†}$ , Xingguang Wei $^{1†}$ , Hongjie Zhang $^{1}$ , Haomin Wang $^{7,1†}$ , Weiye Xu $^{1†}$ , Hao Li $^{1†}$ , Jiahao Wang $^{1†}$ , Nianchen Deng $^{1}$ , Songze Li $^{1}$ , Yinan He $^{1}$ , Tan Jiang $^{2}$ , Jiapeng Luo $^{2}$ , Yi Wang $^{1}$ , Conghui He $^{1}$ , Botian Shi $^{1}$ , Xingcheng Zhang $^{1}$ , Wenqi Shao $^{1}$ , Junjun He $^{1}$ , Yingtong Xiong $^{1}$ , Wenwen Qu $^{1}$ , Peng Sun $^{1}$ , Penglong Jiao $^{1}$ , Han Lv $^{1}$ , Lijun Wu $^{1}$ , Kaipeng Zhang $^{1}$ , Huipeng Deng $^{1}$ , Jiaye Ge $^{1}$ , Kai Chen $^{1}$ , Limin Wang $^{4,1}$ , Min Dou $^{1}$ , Lewei Lu $^{2}$ , Xizhou Zhu $^{3,1}$ , Tong Lu $^{4}$ , Dahua Lin $^{6,1}$ , Yu Qiao $^{1}$ , Jifeng Dai $^{3,1‡}$ , Wenhai Wang $^{6,1‡}$ + +$^{1}$ Shanghai AI Laboratory $^{2}$ SenseTime Research $^{3}$ Tsinghua University $^{4}$ Nanjing University $^{5}$ Fudan University $^{6}$ The Chinese University of Hong Kong $^{7}$ Shanghai Jiao Tong University + +Code: https://github.com/OpenGVLab/InternVL + +Model: https://huggingface.co/OpenGVLab/InternVL3-78B + +Data: https://huggingface.co/datasets/OpenGVLab/InternVL-Data + +# Abstract + +We introduce InternVL3, a significant advancement in the InternVL series featuring a native multimodal pre-training paradigm. Rather than adapting a text-only large language model (LLM) into a multimodal large language model (MLLM) that supports visual inputs, InternVL3 jointly acquires multimodal and linguistic capabilities from both diverse multimodal data and pure-text corpora during a single pre-training stage. This unified training paradigm effectively addresses the complexities and alignment challenges commonly encountered in conventional post-hoc training pipelines for MLLMs. To further improve performance and scalability, InternVL3 incorporates variable visual position encoding (V2PE) to support extended multimodal contexts, employs advanced post-training techniques such as supervised fine-tuning (SFT) and mixed preference optimization (MPO), and adopts test-time scaling strategies alongside an optimized training infrastructure. Extensive empirical evaluations demonstrate that InternVL3 delivers superior performance across a wide range of multi-modal tasks. In particular, InternVL3-78B achieves a score of 72.2 on the MMMU benchmark, setting a new state-of-the-art among open-source MLLMs. Its capabilities remain highly competitive with leading proprietary models, including ChatGPT-4o, Claude 3.5 Sonnet, and Gemini 2.5 Pro, while also maintaining strong pure-language proficiency. In pursuit of open-science principles, we will publicly release both the training data and model weights to foster further research and development in next-generation MLLMs. + +# 1 Introduction + +Multimodal large language models (MLLMs) [32, 66, 121, 21, 19, 123, 68, 114, 97, 136, 71, 31, 85, 117, 18, 89, 105, 69] have recently achieved or even surpassed human-level performance in a broad spectrum of tasks, underscoring their potential as a significant stride toward artificial general intelligence (AGI). Yet, the majority of leading MLLMs—both open-source and proprietary—are adapted from text-only large language models through sophisticated multi-stage pipelines [21, 19, 18, 5, 121, 7]. These “post-hoc” approaches are built upon the + +
InternVL2.5 78BInternVL3 8BInternVL3 78BQwen2.5-VL 72BOther Open-Source MLLMsClaude-3.5 SonnetChatGPT-4o-latestGemini-2.5 Pro
Model WeightsXXX
Training DataXX-XXX
MMMU Multi-discipline70.1%65.6%72.2% (2.1 ↑)70.2%64.5%66.4%72.9%74.7%
MathVista Math72.3%75.2%79.6% (7.3 ↑)74.8%70.5%65.1%71.6%80.9%
AI2D Diagrams89.1%85.2%89.7% (0.6 ↑)88.7%88.1%81.2%86.3%89.5%
ChartQA Charts88.3%86.6%89.7% (1.4 ↑)89.5%88.3%90.8%--
DocVQA Documents95.1%92.7%95.4% (0.3 ↑)96.4%96.5%95.2%--
InfographicVQA infographics84.1%76.8%85.2% (1.1 ↑)87.3%84.7%74.3%--
HallusionBench Hallucination57.4%49.9%59.1% (1.7 ↑)55.2%58.1%55.5%57.0%64.1%
OCRBench OCR854880906 (52↑)885877-894862
LongVideoBench Video63.6%58.8%65.7%(2.1↑)60.7%61.3%---
+ +Figure 1: Multimodal performance of the InternVL series and other advanced MLLMs. The InternVL series has consistently exhibited progressive enhancements in multimodal capabilities. The newly released InternVL3 significantly outperforms existing open-source MLLMs. Moreover, even in comparison with state-of-the-art closed-source commercial models, InternVL3 continues to demonstrate highly competitive performance. + +original text-based pre-training processes, thereby introducing alignment challenges when integrating additional modalities such as vision. In practice, bridging modality gaps often necessitates incorporating auxiliary data from specialized domains (e.g., optical character recognition scenarios) and intricate parameter-freezing or multi-stage fine-tuning schedules to ensure that core linguistic capacities remain uncompromised [73, 7, 5, 18]. Such resource-intensive strategies highlight the need for more efficient multimodal training paradigms. + +In this report, we introduce InternVL3, the latest milestone in the InternVL series [21, 20, 18], which is distinguished by its native multimodal pre-training strategy. Rather than first pre-training a text-only large language model and subsequently retrofitting it via multimodal alignment to support visual processing, InternVL3 learns multimodal capabilities from the pre-training stage by jointly exposed to both text-only corpora and diverse multimodal datasets. This unified approach enables the model to simultaneously acquire linguistic and multimodal competencies in a more efficient and integrated manner. + +InternVL3 further excels through multiple innovations that reinforce both performance and scalability. We employ a variable visual position encoding (V2PE) mechanism [42] to accommodate longer multimodal contexts. Furthermore, advanced post-training strategies—comprising supervised fine-tuning (SFT) and mixed preference optimization (MPO) [124]—together with test-time scaling strategies [125] and an optimized training infrastructure [15], significantly enhance InternVL3's efficiency and performance. + +Comprehensive empirical evaluations demonstrate that InternVL3 surpasses its predecessors (e.g., InternVL2.5 [18]) across a wide range of tasks, including multi-discipline reasoning, document understanding, multi-image / video understanding, real-world comprehension, multimodal hallucination detection, visual grounding, and multilingual capabilities. Notably, by incorporating expanded domain-specific datasets, InternVL3 also exhibits marked improvements in tool usage, GUI agents, industrial image analysis, and spatial reasoning, thus substantially extending the multimodal scenarios addressed by the InternVL series. It proves highly competitive with other open-source MLLMs such as Qwen2.5-VL [7] and remains on par with closed-source models (e.g., ChatGPT-4o [98], Claude-3.5 Sonnet [3], Gemini-2.5 Pro [117]). This versatility is evidenced by its 72.2-point performance on the MMMU benchmark [141], setting a new standard among open-source MLLMs. Additionally, InternVL3 demonstrates language capabilities comparable to other advanced LLMs of similar scale. + +![](images/2543074654573cacd7d214bda38adddfa3eca6683b87e7e4c38e54f1a78f3548.jpg) +Figure 2: Performance of various MLLMs on the OpenCompass multimodal academic leaderboard. The enhanced InternVL series—InternVL3—demonstrates outstanding multimodal capabilities, significantly outperforming both the Qwen2.5-VL series and closed-source models such as Step-1o, GLM-4v-Plus, and GPT-4o. Remarkably, InternVL3-78B also remains highly competitive with the state-of-the-art Gemini-2.5-Pro. + +To foster further advancements within the open-source community, we will release the training data1 and model weights alongside this work, thereby ensuring transparency and reproducibility for the continued development of next-generation MLLMs. + +# 2 InternVL3 + +Building upon the prior InternVL series [21, 19, 18], we propose InternVL3, a new generation within the InternVL model family. InternVL3 is specifically designed to streamline the training pipeline while significantly enhancing multimodal capabilities. In this section, we first delineate the core components of InternVL3, including its model architecture, training procedures, test-time scaling strategies, and infrastructure-level optimizations. + +# 2.1 Model Architecture + +The architecture of InternVL3 follows the same general framework as its predecessors, adhering to the "ViTMLP-LLM" paradigm [66, 18, 41, 20]. Detailed architectural specifications are summarized in Table 1. + +Although the native pre-training paradigm discussed later could enable training MLLMs from scratch, we choose to initialize the ViT and LLM components with pre-trained model weights to reduce computational costs. The vision encoder is available in two configurations: InternViT-300M and InternViT-6B. For the language model, we leverage pre-trained large language models (LLMs), specifically the Qwen2.5 series and InternLM3-8B. Importantly, our LLM components are initialized solely from pre-trained base models, without employing instruction-tuned variants. The multilayer perceptron (MLP) utilized in the model is a two-layer network with random initialization. In line with the approach taken in InternVL2.5, InternVL3 incorporates a pixel unshuffle operation to enhance scalability for processing high-resolution images. This operation reduces the visual token count to one-quarter of its original value, representing each $448 \times 448$ image tile with 256 visual tokens. + +Variable Visual Position Encoding. InternVL3 also integrates the Variable Visual Position Encoding (V2PE) [42], which utilizes smaller, more flexible position increments for visual tokens. This modifica + +
Model Name#ParamVision EncoderLanguage ModelOpenCompass Academic
InternVL3-1B0.9BInternViT-300M-448px-V2.5Qwen2.5-0.5B57.4
InternVL3-2B1.9BInternViT-300M-448px-V2.5Qwen2.5-1.5B63.9
InternVL3-8B8.1BInternViT-300M-448px-V2.5Qwen2.5-7B73.3
InternVL3-9B9.2BInternViT-300M-448px-V2.5InternLM3-8B72.4
InternVL3-14B15.1BInternViT-300M-448px-V2.5Qwen2.5-14B75.5
InternVL3-38B38.4BInternViT-6B-448px-V2.5Qwen2.5-32B77.3
InternVL3-78B78.4BInternViT-6B-448px-V2.5Qwen2.5-72B79.5
+ +Table 1: Pre-trained models used in the InternVL3 series. The OpenCompass scores for the InternVL3 series were obtained through our local testing. + +tion facilitates the handling of longer multimodal contexts without excessively extending the position window. Specifically, each training sample for the MLLM is represented as: + +$$ +\mathbf {x} = \left(x _ {1}, x _ {2}, \dots , x _ {L}\right), \tag {1} +$$ + +where each token $x_{i}$ can be a textual token embedding, a visual embedding, or another modality-specific representation (e.g., video patch embeddings). The position index $p_{i}$ for any token $x_{i}$ can be computed sequentially as follows: + +$$ +p _ {i} = \left\{ \begin{array}{l l} 0, & \text {i f} i = 1, \\ f _ {\text {p o s}} \left(p _ {i - 1}, x _ {i}\right), & \text {f o r} i = 2, 3, \dots , N. \end{array} \right. \tag {2} +$$ + +In contrast to traditional MLLMs, where position indices increment uniformly by 1 for each token, irrespective of modality, V2PE employs a modality-specific recursive function for position index computation. This results in distinct position index assignments for textual and visual tokens: + +$$ +p _ {i} = p _ {i - 1} + \left\{ \begin{array}{l l} 1, & \text {i f} x _ {i} \text {i s a t e x t u a l t o k e n ,} \\ \delta , & \text {i f} x _ {i} \text {i s a v i s u a l t o k e n ,} \end{array} \right. \tag {3} +$$ + +where $\delta$ is a smaller increment ( $\delta < 1$ ), reducing the rate at which position indices increase for visual tokens. The standard increment of 1 is retained for textual tokens to preserve their positional distinctions. In line with the original V2PE design, we maintain that $\delta$ remains constant within a single image to preserve the relative positional relationships. During training, $\delta$ is randomly chosen for each image from a predefined set of fractional values: + +$$ +\delta \in \Delta = \left\{1, \frac {1}{2}, \frac {1}{4}, \frac {1}{8}, \frac {1}{1 6}, \frac {1}{3 2}, \frac {1}{6 4}, \frac {1}{1 2 8}, \frac {1}{2 5 6} \right\}. \tag {4} +$$ + +During inference, $\delta$ can be flexibly selected based on the input sequence length, enabling a balance between task performance and ensuring that position indices remain within the model's valid context range. Notably, when $\delta = 1$ , V2PE reverts to the conventional positional encoding used in InternVL2.5. + +# 2.2 Native Multimodal Pre-Training + +We propose a native multimodal pre-training approach that consolidates language pre-training and multi-modal alignment training into a single pre-training stage. Unlike conventional paradigms—where a language-only large model is first trained (typically with language pre-training followed by language post-training) and subsequently adapted to accommodate additional modalities—our method performs integrated optimization by interleaving multimodal data (e.g., image-text, video-text, or interleaved image-text sequences) with large-scale textual corpora during the pre-training process. This unified training scheme enables the pre-trained model to learn both linguistic and multimodal capabilities simultaneously, ultimately enhancing its capability to handle vision-language tasks without introducing additional bridging modules or subsequent inter-model alignment procedures. + +Multimodal Autoregressive Formulation. Let $\mathcal{M}$ denote a Transformer-based model parameterized by $\theta$ that can process text, image, and video simultaneously. Specifically, for an arbitrary training sample $\mathbf{x} = (x_{1}, x_{2}, \ldots, x_{L})$ with the token length of $L$ , we adopt the standard left-to-right autoregressive objective: + +$$ +\mathcal {L} _ {\text {f u l l}} (\theta) = - \sum_ {i = 2} ^ {L} w _ {i} \cdot \log p _ {\theta} \left(x _ {i} \mid x _ {1}, \dots , x _ {i - 1}\right), \tag {5} +$$ + +where $w_{i}$ denotes the loss weight of token $i$ . Although this formulation naturally propagates gradients through tokens of all modalities, we restrict the loss computation exclusively to text tokens, resulting in: + +$$ +\mathcal {L} _ {\text {t e x t - o n l y}} (\theta) = - \sum_ {\substack {i = 2 \\ x _ {i} \in \text {T e x t}}} ^ {L} w _ {i} \cdot \log p _ {\theta} \left(x _ {i} \mid x _ {1}, \dots , x _ {i - 1}\right). \tag{6} +$$ + +Under this selective objective, visual tokens serve as conditioning context for text prediction and are not directly predicted. Consequently, the model learns to embed multimodal information in a manner that is beneficial for downstream language decoding tasks. Notably, regarding the design choice of the token weight $w_{i}$ , as discussed in InternVL2.5 [18], the widely used token averaging and sample averaging strategies can lead to gradients biased toward longer and shorter responses, respectively. To mitigate this issue, we adopt square averaging, which is defined as: + +$$ +w _ {i} = \left\{ \begin{array}{l l} \frac {1}{l ^ {0}}, & \text {f o r t o k e n a v e r a g i n g} \\ \frac {1}{l ^ {0 . 5}}, & \text {f o r s q u a r e a v e r a g i n g} \\ \frac {1}{l ^ {1}}, & \text {f o r s a m p l e a v e r a g i n g}, \end{array} \right. \tag {7} +$$ + +where $l$ denotes the number of tokens in the training sample on which the loss needs to be calculated. + +Joint Parameter Optimization. Unlike the conventional "language-only training followed by multimodal adaptation" paradigm, our method updates all model parameters jointly during multimodal pre-training. Specifically, let + +$$ +\theta^ {*} = \underset {\theta} {\arg \min } \mathbb {E} _ {\mathbf {x} \in \mathcal {D} _ {\text {m u l t i}}} \left[ \mathcal {L} _ {\text {t e x t - o n l y}} (\theta) \right], \tag {8} +$$ + +where $\mathcal{D}_{\mathrm{multi}}$ is the union of large-scale text-only and multimodal corpora (e.g., image-text or video-text pairs). We thus optimize a single model to handle these combined data sources. This multi-task joint optimization ensures that text representations and visual features are learned in concert, reinforcing alignment across modalities. + +Moreover, this integrated optimization departs from conventional "language-only training followed by multimodal adaptation" pipelines, which often freeze or partially fine-tune certain layers in the LLM component or even in the ViT encoder when adapting to MLLM. In contrast, our method trains every layer jointly, allowing all parameters to be jointly optimized on large-scale multimodal corpora and ensuring that both linguistic and visual features evolve synchronously. As a result, the final parameters are primed for high performance on both pure language and multimodal tasks, without additional tuning steps. + +Data. The pre-training data utilized in InternVL3 is broadly classified into two categories: multimodal data and pure language data. The multimodal dataset comprises a synthesis of pre-existing datasets alongside newly acquired real-world data. Specifically, we leverage the pre-training corpus from InternVL2.5, which covers a diverse range of domains such as image captioning, general question answering, mathematics, charts, optical character recognition (OCR), knowledge grounding, document understanding, multi-turn dialogue, and medical data. Although the overall data scale was not increased, the utility of this dataset was significantly improved by updating not only to the MLP module weights but also to those associated with the ViT and LLM components. In addition, to enhance the model's ability to generalize in real-world applications, additional data is incorporated from tasks related to graphical user interfaces (GUI), tool usage, 3D scene understanding, and video comprehension. + +To compensate for the relatively short and less diverse textual content typically found in multimodal datasets, we integrate pure language data into the pre-training process. This helps preserve and amplify the model's capabilities in language understanding and generation. The language corpus is primarily constructed on the pre-training data from InternLM2.5 and is further augmented with various open-source text datasets [8, 77, 79]. This enhancement aims to improve the model's performance on knowledge-intensive tasks, as well as its proficiency in mathematical and reasoning tasks. + +Given the complexity of balancing these heterogeneous data sources, determining an appropriate sampling strategy is non-trivial. In InternVL3, we adopt a two-stage strategy to establish the optimal sampling ratio between multimodal and language data. Initially, we train separate models on the multimodal and language datasets and evaluate their performance on corresponding benchmarks, allowing us to identify optimal sampling ratios within each modality. Then, under a fixed total training budget, we combine the two modalities and determine their relative sampling ratio. Empirical studies show that a 1:3 ratio of language to multimodal data + +yields the best overall performance across both unimodal and multimodal benchmarks. Under this configuration, the total number of training tokens is approximately 200 billion, comprising 50 billion from language data and 150 billion from multimodal data. + +# 2.3 Post-Training + +After the Native Multimodal Pre-Training, we apply a two-stage post-training strategy to further enhance the multimodal conversation and reasoning abilities of our models. This strategy consists of Supervised Fine-Tuning (SFT) and Mixed Preference Optimization (MPO). In the SFT phase, the model is trained to imitate the high-quality responses under positive supervision signals. In the subsequent MPO phase, we introduce additional supervision from both positive and negative samples, thereby further improving its overall abilities. + +Supervised Fine-Tuning. In this phase, the techniques of random JPEG compression, square loss re-weighting, and multimodal data packing proposed in InternVL2.5 [18] are also employed in the InternVL3 series. The main advancement of the SFT phase in InternVL3 compared to InternVL2.5 lies in the use of higher-quality and more diverse training data. Specifically, we further extend training samples for tool usage, 3D scene understanding, GUI operations, long context tasks, video understanding, scientific diagrams, creative writing, and multimodal reasoning. + +Mixed Preference Optimization. During Pre-training and SFT, the model is trained to predict the next token conditioned on previous ground-truth tokens. However, during inference, the model predicts each token based on its own prior outputs. This discrepancy between ground-truth tokens and model-predicted tokens introduces a distribution shift, which can impair the model's Chain-of-Thought (CoT) reasoning capabilities. To mitigate this issue, we employ Mixed Preference Optimization (MPO) [124], which introduces additional supervision from both positive and negative samples to align the model response distribution with the ground-truth distribution, thereby improving reasoning performance. Specifically, the training objective of MPO is a combination of preference loss $\mathcal{L}_p$ , quality loss $\mathcal{L}_q$ , and generation loss $\mathcal{L}_g$ , which can be formulated as follows: + +$$ +\mathcal {L} = w _ {p} \mathcal {L} _ {p} + w _ {q} \mathcal {L} _ {q} + w _ {g} \mathcal {L} _ {g}, \tag {9} +$$ + +where $w_{*}$ represents the weight assigned to each loss component. Specifically, the DPO loss [101] serves as the preference loss to enable the model to learn the relative preference between chosen and rejected responses: + +$$ +\mathcal {L} _ {p} = - \log \sigma \left(\beta \log \frac {\pi_ {\theta} \left(y _ {c} \mid x\right)}{\pi_ {0} \left(y _ {c} \mid x\right)} - \beta \log \frac {\pi_ {\theta} \left(y _ {r} \mid x\right)}{\pi_ {0} \left(y _ {r} \mid x\right)}\right), \tag {10} +$$ + +where $\beta$ is the KL penalty coefficient, and $x$ , $y_{c}$ , and $y_{r}$ are user query, chosen response, and rejected response, respectively. The policy model $\pi_{\theta}$ is initialized from model $\pi_0$ . After that, the BCO loss [53] is employed as the quality loss, which helps the model to understand the absolute quality of individual responses: + +$$ +\mathcal {L} _ {q} = \mathcal {L} _ {q} ^ {+} + \mathcal {L} _ {q} ^ {-}, \tag {11} +$$ + +where $\mathcal{L}_q^+$ and $\mathcal{L}_q^-$ represent the loss for chosen and rejected responses, respectively. They are calculated independently, requiring the model to differentiate the absolute quality of individual responses. The loss terms are given by: + +$$ +\mathcal {L} _ {q} ^ {+} = - \log \sigma \left(\beta \log \frac {\pi_ {\theta} \left(y _ {c} \mid x\right)}{\pi_ {0} \left(y _ {c} \mid x\right)} - \delta\right), \tag {12} +$$ + +$$ +\mathcal {L} _ {q} ^ {-} = - \log \sigma \left(- \left(\beta \log \frac {\pi_ {\theta} \left(y _ {r} \mid x\right)}{\pi_ {0} \left(y _ {r} \mid x\right)} - \delta\right)\right), \tag {13} +$$ + +where $\delta$ represents the reward shift, calculated as the moving average of previous rewards to stabilize training. Finally, the LM loss is used as the generation loss to help the model learn the generation process of preferred responses. The loss function is defined in Equation 6. + +Data. For SFT data, we construct the training corpora based on those used in InternVL2.5 [18] while introducing additional tool usage, 3D scene understanding, GUI operations, scientific diagrams, creative writing, and multimodal reasoning samples. As a result, the number of training samples grows from 16.3M in InternVL2.5 to 21.7M in InternVL3. For MPO data, we construct preference pairs based on the data pipeline and samples proposed in MMPR v1.2 [124], which cover a wide range of domains, including general visual question answering (VQA) [43, 50, 90, 83, 127, 126], science [57, 16, 82], chart [91, 54, 11], mathematics [72, 104, 10, 81, 55, 40, 147, 106], OCR [92, 107, 9, 49, 96], and document [24]. We use the SFT versions of InternVL3-8B, 38B, and 78B to generate rollouts. During the MPO phase, all models are trained on the same dataset, which comprises about 300K samples. + +# 2.4 Test-Time Scaling + +Test-Time Scaling has been shown to be an effective method to enhance the reasoning abilities of LLMs and MLLMs [108, 94, 87, 70, 120, 36, 152, 125]. In this work, we use the Best-of-N evaluation strategy and employ VisualPRM-8B [125] as the critic model to select the best response for reasoning and mathematics evaluation. + +Visual Process Reward Model. VisualPRM first assigns a quality score to each step of the given solution and then averages these scores to obtain the overall score for this solution. This process is formulated as a multi-turn chat task so that we can effectively leverage the generation ability of MLLMs. The image $I$ , question $q$ , and the first step $s_0$ of the step-by-step solution $s = \{s_0, s_1, \dots, s_n\} \in S$ to this question are included in the first turn and a new step is presented in each subsequent turn. During the training stage, the model is required to predict the correctness of the given step in each turn as follows: + +$$ +c _ {i} \sim M \left(y _ {i} \mid I, q, s _ {\leq i}\right), \tag {14} +$$ + +where $c_{i} \in \{+, -\}$ denotes the correctness of $i$ -th step. During the inference stage, the score for each step is defined as the probability of generating "+" + +Data. VisualPRM400K [125] is used to train VisualPRM, which is constructed based on multimodal questions collected from MMPR v1.2 [124]. Following the data pipeline in VisualPRM400K, we further expand VisualPRM400K by sampling rollouts from the 8B and 38B variants of InternVL3. + +# 2.5 Infrastructure + +To facilitate model training, we extend the InternEVO framework [15]—originally designed to optimize the Zero Redundancy Optimizer (ZeRO) for large-scale LLM training—to support the training of our InternVL models. This extension enables efficient scaling to hundreds of billions of parameters across thousands of GPUs. The enhanced framework introduces flexible and decoupled sharding strategies for the ViT, MLP, and LLM components, significantly improving training efficiency by overlapping communication and computation. It further supports a comprehensive range of parallelism strategies—including data, tensor, sequence, and pipeline parallelism—as well as their arbitrary combinations. + +A key challenge in MLLM training is the imbalance in computational load caused by the varying proportions of visual and textual tokens. Such imbalances can lead to inefficiencies by overburdening either the ViT or LLM modules. To address this, we introduce a suite of techniques that dynamically balance computational workloads across modules, ensuring efficient and equitable resource utilization. + +For InternVL models of varying scales, the extended InternEVO framework formulates an optimization objective that identifies the optimal configuration to minimize both memory consumption and communication overhead across different module dimensions. To support sequences of up to 32K tokens, our approach incorporates both head-parallel and sequence-parallel techniques, effectively overcoming scalability bottlenecks while preserving computational efficiency. Compared to the training of InternVL2.5, the application of InternEVO in InternVL3 results in a training speedup of $50\%$ to $200\%$ for models of comparable size, given the same computational budget. + +# 3 Experiments + +In this section, we first compare the overall multimodal capabilities of InternVL3 with those of current advanced MLLMs using widely adopted multimodal benchmarks. Subsequently, we evaluate the performance of InternVL3 in various domains, including multimodal reasoning, mathematics, optical character recognition (OCR), chart and document understanding, multi-image understanding, real-world comprehension, comprehensive multimodal evaluation, multimodal hallucination evaluation, visual grounding, multimodal multilingual understanding, video understanding, and other multimodal tasks, most of which were tested using VLMEvalKit [33]. Additionally, we provide a detailed evaluation of the language capabilities of InternVL3. Finally, we analyze the advantages of several key modifications in InternVL3 compared to its predecessor, InternVL2.5, including the naive multimodal pre-training, the V2PE positional encoding, and the improvements brought by the post-training technique. + +# 3.1 Overall Comparison to Other Advanced MLLMs + +Figure 1 provides a detailed assessment of InternVL3's performance across a diverse set of benchmarks, including MMMU [141], MathVista [80], AI2D [57], ChartQA [91], DocVQA [93], InfographicVQA [92], + +HallusionBench [45], OCRBench [76], and LongVideoBench [129]. Compared with previous models, InternVL3 demonstrates substantial improvements across a wide range of task categories. These advancements can be primarily attributed to enhanced training strategies, refined testing methodologies, and the expanded training corpus. + +More specifically, InternVL3 achieves an impressive score of 72.2 on the MMMU benchmark, underscoring its superior capacity to manage complex multimodal challenges. Beyond its performance on MMMU, InternVL3 consistently outperforms earlier versions of the InternVL series on a variety of tasks, thereby emphasizing its broad applicability to real-world scenarios that require sophisticated multimodal comprehension and reasoning. + +In addition to surpassing its open-source counterparts, InternVL3 exhibits competitive performance relative to leading closed-source commercial models, such as ChatGPT-4o-latest [98] and Claude-3.5 Sonnet [3]. In many cases, the performance gap between InternVL3 and these proprietary models is notably narrowed—and in certain benchmarks, such as AI2D and ChartQA, InternVL3 even surpasses them. Nonetheless, our results further reveal that Gemini2.5 Pro [117] maintains a performance edge on select tasks (e.g., on HallusionBench), indicating that despite the notable progress in InternVL3, there remains room for further refinement of our InternVL series. + +# 3.2 Multimodal Reasoning and Mathematics + +To comprehensively evaluate the multimodal reasoning and mathematical capabilities of InternVL3, we conduct experiments on a series of benchmarks, including MMMU [141] for multidisciplinary reasoning, MathVista [80], MathVision [119], MathVerse [146] for mathematical reasoning, as well as DynaMath [155], WeMath [99] and LogicVista [131] for complementary evaluation on logical reasoning. + +As shown in Table 2, InternVL3 exhibits strong performance across all tested benchmarks. Specifically, on the MMMU benchmark, InternVL3-based models consistently outperform smaller-scale competitors. For instance, with increasing model size, InternVL3-78B reaches a score over 72 on MMMU, indicating robust understanding and reasoning capability in handling abstract multidisciplinary concepts. In the mathematical domain, InternVL3 demonstrates significant gains across various benchmarks. On MathVista, InternVL3-78B records a performance close to 79.0, while on MathVision and MathVerse, the results are also competitive, evidencing the model's enhanced ability to tackle challenging mathematical problems. Furthermore, performance on DynaMath, WeMath, and LogicVista consistently improves with scaling. The overall score—a mean calculated across all benchmarks—shows that InternVL3 models achieve a balanced enhancement across different aspects, surpassing many of the preceding open-source methods. + +A notable characteristic of InternVL3 is the efficiency of the best-of-N evaluation strategy [125]. When applying this method, even models with relatively smaller parameter sizes (e.g., InternVL3-1B and InternVL3-2B) exhibit substantial improvements in reasoning performance. Specifically, in the Vision-Only split of MathVerse, the best-of-8 strategy leads to increases of approximately 6.0 and 3.2 percentage points for InternVL3-38B and InternVL3-78B, respectively. This improvement underscores the effectiveness of test-time scaling. + +# 3.3 OCR, Chart, and Document Understanding + +To assess the model's integrated vision-language understanding in tasks involving text, document, and chart comprehension, we perform a comprehensive evaluation over nine benchmarks, including AI2D [57], ChartQA [91], TextVQA [107], DocVQA [93], InfoVQA [92], OCRBench [76], SEED-2-Plus [61], CharXiv [128], and VCR [148]. As illustrated in Table 3, the InternVL3 series not only maintains robust performance across these benchmarks but also demonstrates competitive or superior results when compared to other open-source and closed-source counterparts. + +At the 1B scale, InternVL3-1B achieves performance that is roughly on par with previous lower-scale models. At the 2B scale, InternVL3-2B not only improves its absolute scores—for instance, reaching 78.7/87.4 on AI2D and 88.3 on DocVQA—but also exhibits a performance edge over similarly parameterized models such as Qwen2-VL-2B [121]. Although its TextVQA performance (77.0) remains comparable to that of Qwen2-VL-2B, the enhancements in document and chart understanding suggest that the proposed native multimodal pre-training are particularly effective in tasks requiring precise visual-textual integration. + +The benefits of the new pre-training protocol become even more pronounced at larger scales. Mid-scale models like InternVL3-8B and InternVL3-9B deliver substantial gains, with InternVL3-8B achieving 85.2/92.6 on AI2D, 92.7 on DocVQA, and VCR scores of 94.5/98.1. Moreover, when compared with heavyweight systems such as Qwen2-VL-72B [121] or even closed-source models like GPT-4o-20240513 [97], the high-scale variants + +
ModelMMMUMathVistaMathVisionMathVerseDynaMathWeMathLogicVistaOverall
LLaVA-OV-0.5B [60]31.434.8------
InternVL2.5-1B [18]41.247.121.116.45.611.126.024.1
InternVL3-1B43.445.818.818.75.813.429.825.1
w/ VisualPRM-Bo8 [125]55.462.121.728.913.428.534.935.0
Aquila-VL-2B [44]46.959.117.917.45.015.930.627.5
Qwen2.5-VL-3B [7]51.261.221.931.213.222.940.334.6
Ovis-2B [84]45.664.117.729.410.09.934.730.2
Ovis-4B [84]49.069.621.538.518.016.935.335.5
InternVL2.5-2B [18]43.251.114.022.34.48.027.324.3
InternVL2.5-4B [18]51.864.118.427.715.221.234.233.2
InternVL3-2B48.657.021.725.314.622.436.932.4
w/ VisualPRM-Bo8 [125]57.870.526.636.721.438.540.541.7
LLaVA-OV-7B [60]47.958.618.319.39.020.933.329.6
MiniCPM-V2.6 [135]49.860.823.418.99.816.427.529.5
MiniCPM-o2.6 [135]50.973.321.735.010.425.236.036.1
Ovis-8B [84]57.471.825.942.320.427.239.440.6
Qwen2.5-VL-8B [7]55.067.825.441.121.035.244.141.4
InternVL2.5-8B [18]56.264.517.022.89.423.536.032.8
InternVL3-8B62.771.629.339.825.537.144.144.3
w/ VisualPRM-Bo8 [125]66.075.237.546.328.548.149.750.2
InternVL3-9B57.771.527.635.326.733.849.243.1
w/ VisualPRM-Bo8 [125]63.776.233.945.829.146.650.649.4
Ovis2-16B [84]60.773.730.145.826.345.047.447.0
InternVL2.5-26B [18]60.768.223.424.011.430.939.636.9
InternVL3-14B67.175.137.244.431.343.051.249.9
w/ VisualPRM-Bo8 [125]69.377.940.147.733.152.056.253.8
Cambrian-34B [116]49.753.2------
VILA-1.5-40B [71]55.149.5------
Ovis2-34B [84]66.776.131.950.127.551.949.950.6
InternVL2.5-38B [18]63.971.932.236.920.038.347.944.4
InternVL3-38B70.175.134.248.235.348.658.452.8
w/ VisualPRM-Bo8 [125]71.079.441.854.236.155.258.456.6
GPT-4o-20241120 [97]70.760.031.240.634.545.852.847.9
Claude-3.7-Sonnet [3]75.066.841.946.739.749.358.253.9
Gemini-2.0-Flash [30]72.670.443.647.842.147.452.353.7
Gemini-2.0-Pro [29]69.971.348.167.343.356.553.258.5
LLaVA-OV-72B [60]55.767.125.327.215.632.040.937.7
QvQ-72B-Preview [115]70.370.334.948.230.739.058.250.2
Qwen2.5-VL-72B [7]68.274.239.347.335.949.155.752.8
InternVL2.5-78B [18]70.072.332.239.219.239.849.046.0
InternVL3-78B72.279.043.151.035.146.155.954.6
w/ VisualPRM-Bo8 [125]72.280.540.854.237.352.457.956.5
+ +Table 2: Comparison of multimodal reasoning and mathematical performance. MMMU [141] is a multidisciplinary reasoning benchmark. MathVista [80], MathVision [119], MathVerse [146], DynaMath [155], and WeMath [99] are mathematics benchmarks. For MathVerse, we report the performance on Vision-Only split. LogicVista [131] is a logical reasoning benchmark. Part of the results are collected from the OpenCompass leaderboard [26]. The overall score is the average score of the above benchmarks. "w/ VisualPRM-Bo8" denotes that the model is evaluated with Best-of-8 settings, where VisualPRM [125] serves as the critic model. + +of InternVL3—particularly InternVL3-38B and InternVL3-78B—push the envelope further. For instance, InternVL3-78B attains a remarkable OCRBench score of 906 and VCR scores of 96.0/98.6, clearly surpassing the corresponding metrics of comparable models. + +# 3.4 Multi-Image Understanding + +we evaluate the multi-image relation perception and understanding capabilities of InternVL3 across a suite of widely recognized benchmarks, including BLINK [39], Mantis-Eval [51], MMIU [95], MuirBench [118], MMT-Bench [137], and MIRB [153], as presented in Table 4. These benchmarks comprehensively assess skills such as cross-image reasoning and context integration, all of which are crucial for effective multimodal interaction. + +InternVL3 consistently outperforms its earlier counterparts across different parameter scales. For instance, at the 1B scale, InternVL3-1B exhibits a modest yet consistent improvement over preceding models, achieving a BLINK score of 42.9 and an MMT-Bench score of 52.9. The performance gains become even more pronounced + +
Model NameAI2D (w / wo M)ChartQA (test avg)TextVQA (val)DocVQA (test)InfoVQA (test)OCR BenchSEED-2 PlusCharXiv (RQ / DQ)VCR-EN-Easy (EM / Jaccard)Overall
LLaVA-OneVision-0.5B [60]57.1 / -61.4-70.041.8565----
InternVL2-1B [19]64.1 / 70.572.970.581.750.975454.318.1 / 30.721.5 / 48.454.9
InternVL2.5-1B [18]69.3 / 77.875.972.084.856.078559.019.0 / 38.491.5 / 97.068.3
InternVL3-1B69.4 / 78.375.374.181.953.779058.221.0 / 47.189.3 / 96.268.6
Qwen2-VL-2B [121]74.7 / 84.673.579.790.165.580962.4-81.5 / --
Qwen2.5-VL-3B [7]81.6 / -84.079.393.977.179767.631.3 / 58.6--
Aquila-VL-2B [44]75.0 / -76.576.485.058.377263.0-70.0 / --
InternVL2-2B [19]74.1 / 82.376.273.486.958.978460.021.0 / 40.632.9 / 59.262.0
InternVL2.5-2B [18]74.9 / 83.579.274.388.760.980460.921.3 / 49.793.2 / 97.672.1
InternVL3-2B78.7 / 87.480.277.088.366.183564.628.3 / 54.791.2 / 96.974.7
Ovis1.6-Gemma2-9B [84]84.4 / -----830----
MiniCPM-V2.6 [135]82.1 / -82.480.190.8-85265.731.0 / 57.173.9 / 85.7-
Molmo-7B-D [31]- / 93.284.181.792.272.6694----
Qwen2-VL-7B [121]83.0 / 92.183.084.394.576.586669.0-89.7 / 93.8-
Qwen2.5-VL-7B [7]83.9 / -87.384.995.782.686470.442.5/73.9--
InternVL2-8B [19]83.8 / 91.783.377.491.674.879467.531.2 / 56.137.9 / 61.569.7
InternVL2.5-8B [18]84.5 / 92.884.879.193.077.682269.732.9 / 68.692.6 / 97.479.6
InternVL3-8B85.2 / 92.686.680.292.776.888069.737.6 / 73.694.5 / 98.181.3
InternVL3-9B84.6 / 92.986.279.493.679.687768.838.0 / 72.594.2 / 97.981.3
InternVL3-14B86.0 / 93.787.380.594.183.687570.343.1 / 82.294.8 / 98.283.4
InternVL-Chat-V1.5 [19]80.7 / 89.883.880.690.972.572466.329.2 / 58.514.7 / 51.465.9
InternVL2-26B [19]84.5 / 92.584.982.392.975.982567.633.4 / 62.474.5 / 86.776.7
InternVL2.5-26B [18]86.4 / 94.487.282.494.079.885270.835.9 / 73.594.4 / 98.081.8
Qwen2.5-VL-32B [7]---94.883.4-----
Cambrian-34B [116]79.5 / -75.676.775.546.0600-27.3 / 59.779.7 / 89.3-
VILA-1.5-40B [71]69.9 / -67.273.6--460-24.0 / 38.7--
InternVL2-40B [19]86.6 / 94.586.283.093.978.783769.232.3 / 66.084.7 / 92.679.3
InternVL2.5-38B [18]87.6 / 95.188.282.795.383.684271.242.4 / 79.694.7 / 98.283.6
InternVL3-38B88.9 / 95.589.283.995.485.088671.646.4 / 87.296.1 / 98.785.5
GPT-4V [97]78.2 / 89.478.578.088.475.164553.837.1 / 79.952.0 / 65.470.0
GPT-4o-20240513 [97]84.6 / 94.285.777.492.879.273672.047.1 / 84.591.6 / 96.481.6
Claude-3-Opus [3]70.6 / 88.180.867.589.355.669444.230.2 / 71.662.0 / 77.767.3
Claude-3.5-Sonnet [3]81.2 / 94.790.874.195.274.378871.760.2 / 84.363.9 / 74.778.7
Gemini-1.5-Pro [102]79.1 / 94.487.278.893.181.0754-43.3 / 72.062.7 / 77.7-
LLaVA-OneVision-72B [60]85.6 / -83.780.591.374.9741----
NVLM-D-72B [28]85.2 / 94.286.082.192.6-853----
Molmo-72B [31]- / 96.387.383.193.581.9-----
Qwen2-VL-72B [121]88.1 / -88.385.596.584.5877--91.3 / 94.6-
Qwen2.5-VL-72B [7]88.7 / -89.583.596.487.388573.049.7 / 87.4--
InternVL2-Llama3-76B [19]87.6 / 94.888.484.494.182.083969.738.9 / 75.283.2 / 91.381.1
InternVL2.5-78B [18]89.1 / 95.788.383.495.184.185471.342.4 / 82.395.7 / 94.583.9
InternVL3-78B89.7 / 96.089.784.395.486.590671.946.0 / 85.196.0 / 98.685.8
+ +Table 3: Comparison of OCR, chart, and document understanding performance. We evaluate OCR-related capabilities across 9 benchmarks, including AI2D [57], ChartQA [91], TextVQA [107], DocVQA [93], InfoVQA [92], OCRBench [76], SEED-2-Plus [61], CharXiv [128], and VCR [148]. Part of results are collected from [34, 31, 3, 128, 148] and the OpenCompass leaderboard [26]. + +at the 2B scale; InternVL3-2B attains a remarkable 65.9 on Mantis-Eval, representing an improvement of over 11 points relative to InternVL2.5-2B, and also boosts its MMT-Bench performance to 59.5. Such enhancements indicate that the advanced pre-training strategies and enhanced training datasets in InternVL3 significantly elevate its capability to capture and reason over inter-image relationships. + +At higher scales, the trend continues. InternVL3-8B and its subsequent larger variants not only secure steady improvements on BLINK and MMT-Bench but also demonstrate substantial gains on the MIRB and MuirBench benchmarks. In particular, InternVL3-78B reaches a BLINK score of 66.3 and an MMT-Bench score of 73.2, positioning it as a competitive alternative to leading closed-source models like GPT-4o. These results suggest that the learning multimodal capabilities via native multimodal pre-training and the scaling of model parameters are key contributors to the elevated performance observed across diverse evaluation settings. Despite these encouraging outcomes, a noticeable performance gap between our InternVL3 and other MLLMs like Qwen2.5-VL still exists on certain benchmarks, such as MuirBench, implying that future work may benefit from further enhancements in training data curation and additional model refinements. + +
Model NameBLINK (val)Mantis EvalMMIUMuir BenchMMT (val)MIRB (avg)OverallRealWorld QAMME-RW (EN)WildVision (win rate)R-Bench (dis)Overall
LLaVA-OneVision-0.5B [60]52.139.6-25.5---55.6----
InternVL2-1B [19]38.646.137.329.349.531.538.750.340.217.855.641.0
InternVL2.5-1B [18]42.051.238.529.950.335.641.357.544.243.459.051.0
InternVL3-1B42.950.239.331.252.936.142.158.246.043.860.452.1
Qwen2-VL-2B [121]44.4---55.1--62.6----
Qwen2.5-VL-3B [6]47.6--47.7---65.453.1---
InternVL2-2B [19]43.848.439.832.550.432.141.257.347.331.856.848.3
InternVL2.5-2B [18]44.054.843.540.654.536.445.660.148.844.262.253.8
InternVL3-2B50.365.943.038.859.542.950.164.353.848.867.558.6
Qwen2-VL-7B [121]53.2---64.0--70.156.5-64.0-
Qwen2.5-VL-7B [6]56.4--59.6---68.557.4---
MiniCPM-V2.6 [135]53.069.0--60.8--65.0----
InternVL2-8B [19]50.965.442.048.760.050.052.864.453.554.467.960.1
InternVL2.5-8B [18]54.867.746.751.162.352.555.970.159.162.070.165.3
InternVL3-8B55.570.146.855.065.056.858.270.862.069.874.169.2
InternVL3-9B58.670.150.451.465.458.659.170.561.363.870.366.5
InternVL3-14B60.376.050.956.270.359.362.270.764.069.869.368.5
InternVL-Chat-V1.5 [19]46.666.837.438.558.050.349.666.049.456.667.960.0
InternVL2-26B [19]56.269.642.650.660.653.755.668.358.762.270.164.8
InternVL2.5-26B [18]61.875.649.461.166.955.761.874.561.865.272.968.6
Cambrian-34B [116]-------67.844.1---
InternVL2-40B [19]57.271.447.954.466.255.258.771.861.863.273.367.5
InternVL2.5-38B [18]63.278.355.362.770.061.265.173.564.066.472.169.0
InternVL3-38B64.077.957.463.871.862.366.275.667.371.673.372.0
GPT-4V [97]54.662.7-62.364.353.1-61.4-71.865.6-
GPT-4o-20240513 [97]68.0-55.768.065.4--75.445.280.677.769.7
Claude-3.5-Sonnet [3]--53.4----60.151.6---
Gemini-1.5-Pro [102]--53.4-64.5--67.538.2---
LLaVA-OneVision-72B [60]55.477.6-54.8---71.9----
Qwen2-VL-72B [121]----71.8--77.8----
Qwen2.5-VL-72B [6]64.4--70.7---75.763.2---
InternVL2-Llama3-76B [19]56.873.744.251.267.458.258.672.263.065.874.168.8
InternVL2.5-78B [18]63.877.055.863.570.861.165.378.762.971.477.272.6
InternVL3-78B66.379.360.464.573.264.368.078.065.473.677.473.6
+ +Table 4: Comparison of multi-image and real-world understanding performance. Multi-image benchmarks include BLINK [39], Mantis-Eval [51], MMIU [95], MuirBench [118], MMT-Bench [137], and MIRB [153]. Real-world benchmarks encompass RealWorldQA [27], MME-RealWorld [151], WildVision [86], and R-Bench [62]. Part of the results are sourced from the benchmark papers and the OpenCompass leaderboard [26]. + +# 3.5 Real-World Comprehension + +We evaluate the InternVL3 series on four real-world comprehension benchmarks—RealWorldQA [27], MME-RealWorld [151], WildVision [86], and R-Bench [62]—to assess its ability to tackle realistic and complex tasks. As shown in Table 4, even the smallest variant in the InternVL3 family (InternVL3-1B) demonstrates promising performance with a RealWorldQA score of 58.2, an MME-RealWorld score of 46.0, a WildVision win rate of 43.8, and an R-Bench score of 60.4. Scaling up the model yields further enhancements across all metrics. Mid-sized variants such as InternVL3-8B and InternVL3-14B continue this positive trend, with InternVL3-8B reporting a RealWorldQA score of 70.8 and an R-Bench score of 74.1. These improvements highlight the effectiveness of scaling, as larger models provide more robust representations and enhanced comprehension capabilities in real-world scenarios. + +At the higher end of the scale, the InternVL3-38B and InternVL3-78B models achieve top-tier results among the InternVL3 series. Notably, InternVL3-78B records a RealWorldQA score of 78.0, an MME-RealWorld score of 65.4, a WildVision win rate of 73.6, and an R-Bench score of 77.4. When compared with competitive models, such as GPT-4o [97]—which scores 75.4 on RealWorldQA and 80.6 on WildVision—the InternVL3 series exhibits competitive strengths. InternVL3-78B not only surpasses GPT-4o on RealWorldQA and closely matches its R-Bench performance but also considerably outperforms it on MME-RealWorld, indicating an overall robust performance on tasks demanding both perceptual precision and comprehensive understanding. + +
Model NameMME (sum)MMB (EN / CN)MMBv1.1 (EN)MMVet (turbo)MMVet2 (0613)MMStarOverallHallBench (avg)MMHal (score)CRPE (relation)POPE (avg)Overall
LLaVA-OneVision-0.5B [60]1438.061.6 / 55.559.632.2-37.7-27.9----
InternVL2-1B [19]1794.465.4 / 60.761.632.736.145.751.734.02.2557.587.345.3
InternVL2.5-1B [18]1950.570.7 / 66.368.448.843.250.158.939.02.4960.989.948.1
InternVL3-1B1934.472.6 / 67.969.959.547.551.561.941.42.5964.090.749.7
Qwen2-VL-2B [121]1872.074.9 / 73.572.249.5-48.0-41.7----
Qwen2.5-VL-3B [6]215779.1 / 78.177.461.8-55.9-46.3-73.6--
InternVL2-2B [19]1876.873.2 / 70.970.239.539.650.158.037.92.5266.388.348.8
InternVL2.5-2B [18]2138.274.7 / 71.972.260.852.353.765.342.62.9470.290.651.6
InternVL3-2B2221.281.1 / 78.478.662.253.960.769.842.53.2671.589.651.7
Qwen2-VL-7B [121]2326.883.0 / 80.580.762.0-60.7-50.63.4074.488.154.1
Qwen2.5-VL-7B [6]234783.5 / 83.482.667.1-63.9-52.9-76.4--
MiniCPM-V2.6 [135]2348.481.5 / 79.378.060.0-57.5-48.13.6075.287.353.6
InternVL2-8B [19]2210.381.7 / 81.279.554.252.362.069.245.23.3375.886.952.8
InternVL2.5-8B [18]2344.184.6 / 82.683.262.858.162.873.250.13.6578.490.655.7
InternVL3-8B2415.483.4 / 82.281.781.366.368.277.749.93.6176.391.155.2
InternVL3-9B2372.883.4 / 82.281.776.265.466.376.351.23.4775.090.455.0
InternVL3-14B2478.385.6 / 84.183.580.268.468.879.055.13.4977.390.256.5
InternVL-Chat-V1.5 [19]2194.282.2 / 82.080.361.551.557.369.750.33.1175.488.454.3
InternVL2-26B [19]2260.783.4 / 82.081.562.157.261.271.850.73.5575.688.054.5
InternVL2.5-26B [18]2373.385.4 / 85.584.265.060.866.575.255.03.7079.190.657.1
Cambrian-34B [116]-80.4 / 79.278.353.2-54.2-41.6----
InternVL2-40B [19]2307.586.8 / 86.585.165.563.865.475.756.93.7577.688.456.7
InternVL2.5-38B [18]2455.886.5 / 86.385.568.862.167.977.056.83.7178.390.757.4
InternVL3-38B2523.687.6 / 86.886.983.969.671.581.557.13.7777.190.657.1
GPT-4V [97]1926.681.0 / 80.280.067.566.356.070.746.5----
GPT-4o-20240513 [97]-83.4 / 82.183.169.171.064.7-55.04.0076.686.955.6
Claude-3-Opus [3]1586.863.3 / 59.260.151.755.845.755.537.8----
Claude-3.5-Sonnet [3]-82.6 / 83.580.970.171.865.1-55.5----
Gemini-1.5-Pro [102]-73.9 / 73.874.664.066.959.1-45.6----
LLaVA-OneVision-72B [60]2261.085.8 / 85.385.060.6-65.8-49.0----
Qwen2-VL-72B [121]2482.786.5 / 86.685.974.066.968.378.758.1----
Qwen2.5-VL-72B [6]2448.088.6 / 87.988.476.2-70.8-55.2-79.2--
InternVL2-Llama3-76B [19]2414.786.5 / 86.385.565.768.467.477.255.23.8377.689.056.4
InternVL2.5-78B [18]2494.588.3 / 88.587.472.365.569.579.257.43.8978.890.857.7
InternVL3-78B2549.889.0 / 88.787.781.370.072.582.059.13.8579.290.358.1
+ +Table 5: Comparison of comprehensive multimodal understanding and hallucination performance. Comprehensive multimodal benchmarks include MME [37], MMBench series [75], MMVet series [138, 139], and MMStar [13]. Hallucination benchmarks encompass HallusionBench [45], MMHal [111], CRPE [126], and POPE [67]. Part of the results are sourced from the benchmark papers and the OpenCompass leaderboard [26]. + +# 3.6 Comprehensive Multimodal Evaluation + +The comprehensive multimodal evaluation is based on established benchmarks including MME [37], MMBench (evaluating both English and Chinese tasks) [75], MMBench v1.1 (English) [75], MMVet [138], MMVet v2 [139], and MMStar [13], as summarized in Table 5. Specifically, InternVL3-1B achieves an MMBench score of 72.6/67.9 (English/Chinese) and improves the MMBench v1.1 score to 69.9, compared to the InternVL2.5-1B baseline (70.7/66.3 and 68.4, respectively). The improvements become more pronounced at the 2B scale, where InternVL3-2B records an MME of 2221.2 and reaches an MMBench performance of 81.1/78.4, along with an MMBench v1.1 score of 78.6. + +At larger scales, InternVL3 models consistently demonstrate superior performance. For example, the InternVL3-8B model achieves an MME of 2415.4, while the InternVL3-38B and InternVL3-78B models record MME scores of 2523.6 and 2549.8, respectively. The corresponding MMBench and MMBench v1.1 scores also show steady improvements, with InternVL3-78B attaining 89.0/88.7 for English/Chinese and 87.7 for English-only tasks. When compared with other competitive models, such as Qwen2-VL-72B and Qwen2.5-VL-72B, the InternVL3 series—especially the 78B variant—offers a consistent performance advantage on the multimodal understanding benchmarks. + +
Model NameRefCOCORefCOCO+RefCOCOg
valtest-Atest-Bvaltest-Atest-Bvaltest
Grounding-DINO-L [74]90.693.288.282.889.075.986.187.086.6
UNINEXT-H [133]92.694.391.585.289.679.888.789.488.9
ONE-PEACE [122]92.694.289.388.892.283.289.289.389.8
Qwen2.5-VL-3B [6]89.191.784.082.488.074.185.285.785.0
InternVL3-1B85.890.181.776.684.169.282.882.681.6
InternVL3-2B89.892.686.484.089.276.587.687.286.7
Shikra-7B [12]87.090.680.281.687.472.182.382.282.9
Ferret-v2-13B [144]92.695.088.987.492.181.489.490.089.6
CogVLM-Grounding [123]92.894.889.088.792.983.489.890.890.3
MM1.5 [143]-92.586.7-88.777.8-87.1-
Qwen2-VL-7B [121]91.793.687.385.890.579.587.387.887.9
Qwen2.5-VL-7B [6]90.092.585.484.289.176.987.287.286.6
TextHawk2 [140]91.993.087.686.290.080.488.288.188.2
InternVL2-8B [19]87.191.180.779.887.971.482.782.782.9
InternVL2.5-8B [18]90.394.585.985.291.578.886.787.687.6
InternVL3-8B92.594.688.088.292.581.889.690.089.6
InternVL3-9B91.893.286.686.491.079.988.088.588.2
InternVL3-14B92.094.487.887.492.181.588.689.389.1
Qwen2-VL-72B [121]93.295.390.790.193.885.689.990.491.1
Qwen2.5-VL-72B [6]92.794.689.788.992.283.789.990.390.3
InternVL2-Llama3-76B [19]92.294.888.488.893.182.889.590.390.0
InternVL2.5-78B [18]93.795.692.590.494.786.992.792.292.3
InternVL3-38B93.295.190.289.893.285.291.491.591.2
InternVL3-78B93.495.490.390.193.885.391.591.591.4
+ +Table 6: Comparison of visual grounding performance. We evaluate InternVL's visual grounding capability on RefCOCO, RefCOCO+, and RefCOCOg datasets [56, 88]. Parts of the results are collected from [121]. + +# 3.7 Multimodal Hallucination Evaluation + +We evaluate InternVL's propensity for hallucinations on four established benchmarks—HallusionBench [45], MMHal-Bench [111], CRPE [126], and POPE [67]—as detailed in Table 5. In comparison with previous InternVL series, the new InternVL3 models demonstrate overall competitive performance across varying scales, while providing consistent improvements in handling multimodal hallucination challenges. In the small-parameter regime, InternVL3-1B attains a HallusionBench score of 41.4, representing an appreciable gain over the InternVL2.5-1B baseline, which scored 39.0. Similarly, the 2B variant of InternVL3 shows a comparable HallusionBench performance (42.5) to its InternVL2.5 counterpart (42.6), while registering a modest improvement in CRPE performance (71.5 vs. 70.2). + +In the large-scale setting, InternVL3-38B and InternVL3-78B are particularly noteworthy. InternVL3-38B obtains a HallusionBench score of 57.1, while InternVL3-78B reaches 59.1, accompanied by a CRPE improvement to 79.2. These figures position the InternVL3 series as competitive with leading closed- and open-source models such as GPT-4o and the Qwen2.5-VL series. Despite these advancements, minor declines on certain benchmarks, such as MMHal, indicate that although the InternVL3 series has made overall progress, optimizing data and training strategies to achieve more consistent improvements remains an important direction for future work. + +# 3.8 Visual Grounding + +We evaluate InternVL's visual grounding capability on the RefCOCO [56], RefCOCO+[56], and RefCOCOg[88] datasets, where the model is tasked with accurately localizing target objects in images from given textual descriptions. Table 6 shows a comprehensive comparison across various models, including several specialized grounding models as well as multiple MLLLMs. + +Among the smaller-scale models, we observe that while Qwen2.5-VL-3B achieves an average score of 85.0, the InternVL3-1B and InternVL3-2B models yield average scores of 81.6 and 86.7, respectively. Notably, when scaling up, the InternVL3 series exhibits promising improvements. InternVL3-8B, InternVL3-9B, and InternVL3-14B yield average scores around 88.2–89.6, reflecting a consistent trend of performance gains as the model size increases. However, when reaching larger scales, the performance gains appear to plateau. For instance, InternVL2.5-78B reaches an average score of 92.3, and InternVL3-78B only shows a score of 91.4. We speculate that this is because InternVL3's training data expansion does not include additional grounding-specific data and the relative reduction in grounding-targeted data could have restricted the localization capabilities. + +
Model NameMMMBMultilingual MMBenchMTVQA (avg)Overall
enzhptartrruenzhptartrru
InternVL2-1B [19]73.267.455.553.543.855.267.961.250.843.331.852.712.640.7
InternVL2.5-1B [18]78.870.261.555.045.361.172.564.757.043.037.853.221.446.0
InternVL3-1B79.470.162.358.047.661.972.666.262.348.039.560.322.247.9
Qwen2-VL-2B [121]78.374.272.668.361.872.872.171.169.961.154.469.320.052.6
Qwen2.5-VL-3B [6]------------24.8-
InternVL2-2B [19]79.471.654.043.546.448.173.869.651.429.831.342.310.939.3
InternVL2.5-2B [18]81.474.458.248.346.453.276.571.655.937.333.944.821.845.2
InternVL3-2B81.978.375.468.662.974.681.377.875.966.459.570.726.757.4
mPLUG-Owl2 [136]67.361.059.745.845.462.666.259.458.237.947.760.4--
Qwen2-VL-7B [121]83.982.481.279.074.782.481.881.679.175.674.579.325.661.6
Qwen2.5-VL-7B [6]------------29.2-
InternVL2-8B [19]83.481.576.166.369.275.782.981.876.060.566.074.420.956.6
InternVL2.5-8B [18]84.383.178.669.371.579.583.883.279.464.367.877.327.660.4
InternVL3-8B85.183.182.581.676.283.485.585.683.279.275.982.630.264.7
InternVL3-9B84.883.780.669.968.580.886.585.279.164.368.379.127.160.7
InternVL3-14B85.784.783.183.779.383.686.785.883.281.180.783.831.666.2
InternVL-Chat-V1.5 [19]82.680.876.365.268.674.081.180.276.956.266.771.020.555.7
InternVL2-26B [19]83.881.778.068.869.376.382.781.877.861.969.674.417.756.2
InternVL2.5-26B [18]86.283.881.673.373.782.886.185.580.767.575.079.628.562.6
InternVL2-40B [19]85.384.181.170.374.281.486.285.882.864.074.281.820.659.7
InternVL2.5-38B [18]86.485.184.184.382.884.987.588.685.384.584.085.931.767.4
InternVL3-38B86.785.684.584.882.685.189.089.387.184.684.387.432.468.1
GPT-4V [97]75.074.271.573.569.073.177.674.472.572.370.574.822.056.1
GPT-4o [97]------------27.8-
Gemini-1.0-Pro [114]75.071.970.669.969.672.773.672.170.361.169.870.5--
Qwen2-VL-72B [121]86.885.385.284.884.285.386.987.285.883.584.485.330.967.2
Qwen2.5-VL-72B [6]------------31.7-
InternVL2-Llama3-76B [19]85.385.182.882.883.083.787.887.385.983.185.085.722.063.9
InternVL2.5-78B [18]86.385.685.184.883.185.490.089.787.483.384.986.331.968.0
InternVL3-78B87.286.685.586.584.686.189.490.388.786.186.688.132.568.9
+ +Table 7: Comparison of multimodal multilingual performance. We evaluate multilingual capabilities across 3 benchmarks, including MMMB [109], Multilingual MMBench [109] and MTVQA [113]. The languages evaluated are English (en), Chinese (zh), Portuguese (pt), Arabic (ar), Turkish (tr), and Russian (ru). + +# 3.9 Multimodal Multilingual Understanding + +We assess InternVL's multimodal multilingual understanding capabilities using benchmarks—MMMB, Multilingual MMBench [109], and MTVQA [113]—as shown in Table 7. The InternVL3 series demonstrates consistent improvements in multilingual performance compared to previous predecessors. For example, the lightweight InternVL3-1B already shows a modest improvement over InternVL2.5-1B, while the larger-scale variants, such as InternVL3-38B and InternVL3-78B, achieve significantly higher average scores across all three benchmarks. + +Comparisons with other leading models further highlight the effectiveness of the InternVL3 series. Notably, the InternVL3 variants achieve performance that is competitive with or superior to models such as Qwen2-VL-72B [121] and Qwen2.5-VL-72B [6]. Overall, the enhanced performance of the InternVL3 series across MMMB, Multilingual MMBench, and MTVQA underscores the promise of our approach in advancing global multimodal applications. + +# 3.10 Video Understanding + +Video understanding is essential for evaluating how well MLLMs capture temporal and multimodal cues in complex video content. In this work, we assess the InternVL3 series on six established benchmarks—Video-MME [38], MVBench [65], MMBench-Video [35], MLVU [154], LongVideoBench [129], and CG-Bench [2], as detailed in Table 8. + +Overall, the InternVL3 models demonstrate clear performance improvements and a strong scalability trend over their predecessors. As the model capacity increases, the performance gains become more pronounced. For instance, InternVL3-2B records higher Video-MME scores (58.9/61.4) and improved MVBench and MLVU performance compared to the earlier 2B variants. + +
Model NameVideo-MME (wo / w sub)MVBenchMMBench-Video (val)MLVU (M-Avg)LongVideoBench (val total)CG-Bench (long / clue acc.)Overall
InternVL2-1B [19]42.9 / 45.457.51.1451.643.3--
InternVL2.5-1B [18]50.3 / 52.364.31.3657.347.9--
InternVL3-1B51.0 / 53.063.11.353.048.124.8 / 39.146.9
Qwen2-VL-2B [121]55.6 / 60.463.2-----
Qwen2.5-VL-3B [7]61.5 / 67.667.01.6368.243.3--
InternVL2-2B [19]46.2 / 49.160.21.3054.346.0--
InternVL2.5-2B [18]51.9 / 54.168.81.4461.452.0--
InternVL3-2B58.9 / 61.470.41.4264.255.430.8 / 50.754.9
VideoChat2-HD [64]45.3 / 55.762.31.2247.9---
MiniCPM-V-2.6 [135]60.9 / 63.6-1.70-54.9--
LLaVA-OneVision-7B [60]58.2 / -56.7-----
Qwen2-VL-7B [121]63.3 / 69.067.01.44-55.6--
Qwen2.5-VL-7B [7]65.1 / 71.669.61.7970.245.3--
InternVL2-8B [19]56.3 / 59.365.81.5764.054.6--
InternVL2.5-8B [18]64.2 / 66.972.01.6868.960.0--
InternVL3-8B66.3 / 68.975.41.6971.458.838.6 / 55.261.4
InternVL3-9B66.7 / 68.974.31.6970.862.541.1 / 58.062.3
InternVL3-14B70.4 / 73.076.61.7373.363.944.1 / 60.664.9
InternVL2-26B [19]57.0 / 60.267.51.6764.256.1--
InternVL2.5-26B66.9 / 69.275.21.8672.359.9--
Oryx-1.5-32B [78]67.3 / 74.970.11.5272.3---
Qwen2.5-VL-32B [7]70.5 / 77.9-1.93----
VILA-1.5-40B [71]60.1 / 61.1-1.6156.7---
InternVL2-40B [19]66.1 / 68.672.01.7871.060.6--
InternVL2.5-38B [18]70.7 / 73.174.41.8275.363.3--
InternVL3-38B72.7 / 75.076.91.8177.867.346.9 / 62.867.5
GPT-4V/4T [1]59.9 / 63.343.71.5349.259.1--
GPT-4o-20240513 [97]71.9 / 77.2-1.6364.666.7--
GPT-4o-20240806 [97]--1.87--41.8 / 58.3-
Gemini-1.5-Pro [102]75.0 / 81.3-1.30-64.040.1 / 56.4-
VideoLLaMA2-72B [23]61.4 / 63.162.0-----
LLaVA-OneVision-72B [60]66.2 / 69.559.4-66.461.3--
Qwen2-VL-72B [121]71.2 / 77.873.61.70--41.3 / 56.2-
Qwen2.5-VL-72B [7]73.3 / 79.170.42.0274.660.7--
InternVL2-Llama3-76B [19]64.7 / 67.869.61.7169.961.1--
InternVL2.5-78B [18]72.1 / 74.076.41.9775.763.642.2 / 58.566.0
InternVL3-78B72.7 / 75.778.71.8179.565.748.4 / 65.368.3
+ +Table 8: Comparison of video understanding performance. We evaluate InternVL's video understanding capabilities across 6 benchmarks. For Video-MME [38], MMBench-Video [35], MLVU [154], and LongVideoBench [129], we test with four different settings: 16, 32, 48, and 64 frames, and report the maximum results. For MVBench [65], we conduct testing using 16 frames. For CG-Bench [2], we use 32 frames. + +The scaling behavior of the InternVL3 series is further evident in the larger models. InternVL3-14B attains a Video-MME score of 70.4/73.0, while InternVL3-38B and InternVL3-78B push these metrics even higher, reaching scores of 72.7/75.0 and 72.7/75.7, respectively. Additionally, the inclusion of CG-Bench evaluations for the InternVL3 series provides further insight into long-range video reasoning, with performance steadily improving as model size increases—for example, InternVL3-78B attains 48.4/65.3 on CG-Bench. + +When compared with other open-source models, the InternVL3 series demonstrates competitive advantages. For instance, while Qwen2.5-VL models achieve impressive results (with Qwen2.5-VL-72B scoring 73.3/79.1 on Video-MME), the InternVL3 series tends to outperform them in other metrics, such as MVBench and MLVU. Similarly, while closed-source systems like Gemini-1.5-Pro sometimes yield superior results on select benchmarks (e.g., Video-MME), the overall performance of InternVL3, especially at larger scales, is highly competitive. + +# 3.11 GUI Grounding + +GUI grounding requires precise localization and understanding of interface elements, which is critical for applications like automated UI testing and assistive technologies. In Table 9, we report the performance on GUI grounding benchmarks, comparing InternVL3 with state-of-the-art multimodal and GUI-specific models. The results demonstrate that InternVL3 achieves competitive performance across different scales. On + +
MethodGPT-4oGemini 2.0ClaudeAguvis-72BQwen2.5-VL-72BUI-TARS-72BInternVL3-8B-38B-72B
ScreenSpot18.184.083.089.287.188.479.585.688.7
ScreenSpot-V2-----90.381.488.390.9
+ +Table 9: Performance of InternVL3 and other models on GUI grounding benchmarks. + +
Model NameObj.countAbs.Dist.Obj.sizeRoom SizeRel.Dist.Rel.Dir.Route PlanAppr.OrderOverall
GPT-4o [97]46.25.343.838.237.041.331.528.534.0
Gemini-1.5 Pro [102]56.230.964.143.651.346.336.034.645.4
VILA-1.5-8B [71]17.421.850.318.832.134.831.024.828.9
LongVA-7B [145]38.016.638.922.233.143.325.415.729.2
LLaVA-NeXT-Video-7B [150]48.514.047.824.243.542.434.030.635.6
LLaVA-OneVision-7B [60]47.720.247.412.342.535.229.424.432.4
InternVL3-8B68.139.048.433.648.336.427.335.442.1
InternVL3-38B71.750.246.141.753.538.628.960.748.9
LLaVA-NeXT-Video-72B [150]48.922.857.435.342.436.735.048.640.9
LLaVA-OneVision-72B [60]43.523.957.637.542.539.932.544.640.2
InternVL3-78B71.253.744.439.555.939.528.954.548.4
+ +Table 10: Performance of InternVL3 and other models on VSI-Bench. + +ScreenSpot [22], InternVL3-72B achieves $88.7\%$ accuracy, slightly outperforming UI-TARS-72B [100] $(88.4\%)$ and Qwen2.5-VL-72B $(87.1\%)$ , while Aguvis-72B [132] leads with $89.2\%$ . Notably, InternVL3-38B $(85.6\%)$ surpasses GPT-4o $(18.1\%)$ and Gemini 2.0 $(84.0\%)$ by a significant margin. + +For the more challenging ScreenSpot-V2 [130] benchmark, InternVL3 exhibits strong scaling behavior: InternVL3-72B achieves $90.9\%$ , outperforming UI-TARS-72B $(90.3\%)$ . The 8B variant $(81.4\%)$ already surpasses UI-TARS-72B, while the 38B model $(88.3\%)$ further closes the gap to the 72B version. These results highlight InternVL3's robustness in GUI understanding tasks, particularly in handling complex screen layouts and dynamic interfaces. The performance improvements with model scale suggest that larger architectures better capture the fine-grained visual-textual alignments required for precise GUI grounding. The superior performance of the InternVL3 models highlights their robustness in interpreting complex visual layouts. Future work will explore extending these capabilities to more dynamic and interactive GUI environments. + +# 3.12 Spatial Reasoning + +Spatial reasoning involves constructing a mental representation of a three-dimensional environment from visual inputs—a capability that is vital for applications such as autonomous driving. Table 10 reports the performance results on the Visual-Spatial Intelligence Benchmark (VSI-Bench) [134], where InternVL3 is compared against other state-of-the-art MLLMs. The results clearly indicate that InternVL3 outperforms its competitors in spatial reasoning tasks. In particular, the InternVL3-8B variant achieves a score of 42.1, leading all open-source MLLMs in the benchmark. Moreover, the InternVL3-38B and InternVL3-78B variants score 48.9 and 48.4, respectively—both superior to proprietary models such as GPT-4o, Gemini-1.5 Flash, and Gemini-1.5 Pro. + +Furthermore, InternVL3 exhibits exceptional performance in several sub-category tasks within the benchmark. It attains a score of 71.2 in object counting, 53.7 in absolute distance estimation, 55.9 in relative distance estimation, and 54.5 in appearance order prediction, demonstrating its robust spatial reasoning capabilities. These promising results underscore the potential of InternVL3 for advancing 3D scene understanding, and future work will explore its integration into various downstream applications. + +# 3.13 Evaluation on Language Capability + +Table 11 presents the performance evaluation of language capabilities across a diverse array of benchmarks. These benchmarks cover comprehensive assessments in general knowledge, linguistic understanding, reasoning, mathematics, and coding tasks, such as MMLU [46], CMMLU [63], C-Eval [48], GAOKAO-Bench [149], TriviaQA [52], NaturalQuestions [58, 110], RACE [59], WinoGrande [103], HellaSwag [142], BigBench Hard [112], GSM8K-Test [25], MATH [47], TheoremQA [17], HumanEval [14], MBPP [4], and MBPP-CN [4]. + +In particular, the experiments conducted compare the performance of Qwen2.5 chat models against corresponding InternVL3 variants. Both model series share the same pre-trained Qwen2.5 base model as their initialization. After undergoing native multimodal pre-training followed by additional post-training, the In + +
DatasetVersionQwen2.5-0.5B ChatQwen2.5-1.5B ChatQwen2.5-7B ChatQwen2.5-14B ChatQwen2.5-32B ChatQwen2.5-72B Chat
InterVL3-1BInterVL3-2BInterVL3-8BInterVL3-14BInterVL3-38BInterVL3-78B
MMLU4d595a46.449.861.864.874.277.379.582.183.385.484.486.9
CMMLUc1336547.256.762.972.278.884.482.685.885.888.787.489.9
C-Eval2daf2453.559.066.273.377.884.581.485.686.589.288.189.5
GAOKAO4c31db30.946.653.767.781.389.586.991.290.893.591.093.1
TriviaQA2121ce24.221.539.841.255.851.565.167.465.870.174.074.7
NaturalQuestions3dceal8.28.515.215.917.928.219.731.419.731.023.839.0
C38c358f35.266.381.284.790.895.192.196.392.397.496.197.6
RACE-High69ee4f51.568.876.084.686.890.889.693.091.594.291.794.2
WinoGrandeb3677047.252.956.561.971.578.179.184.383.886.783.987.8
HellaSwage4271039.347.062.073.885.490.290.593.092.195.592.795.6
BBH5b92b021.534.539.752.065.777.473.082.585.587.785.485.2
GSM8K1d7fe439.047.261.672.580.183.182.488.484.789.788.290.5
MATH39342427.832.749.357.372.672.273.776.381.172.281.478.9
TheoremQA6f0af812.312.914.415.620.125.518.524.121.918.922.930.4
HumanEval8e312c27.439.051.862.882.378.181.178.189.087.887.282.3
MBPPa447ff38.547.551.460.774.369.376.775.183.777.486.876.7
MBPP-CN9114d519.630.634.445.864.464.475.467.277.875.476.076.0
Overall-33.542.451.659.269.472.973.476.677.478.978.980.5
+ +Table 11: Comparison of language model performance across multiple benchmarks. These results were obtained using the OpenCompass toolkit. We compare InternVL3 with Qwen2.5 Chat models, whose corresponding pre-trained base models are employed as the initialization of the language component in InternVL3. Please note that the evaluation scores of the Qwen2.5 series may differ from those officially reported, as we have adopted the prompt versions provided in the table across all datasets for OpenCompass evaluation. + +
V2PEδTextVQA valVizWiz valChartQA test avgDocVQA valAI2D testInfoVQA valGQA testSQA-I testPOPETiny LVLMMMMU valSEED v1 imageOverall
X-78.461.781.489.481.169.460.894.487.9348.552.675.675.2
1/25678.061.781.288.581.067.761.094.488.3345.352.975.975.0
1/6478.362.081.789.481.369.660.994.788.3345.752.376.175.3
1/1678.762.181.790.481.670.461.195.088.2345.053.376.175.6
1/479.062.282.491.081.871.761.294.988.1345.852.676.275.9
1/178.761.782.290.281.771.461.294.688.5347.252.476.175.7
+ +Table 12: Performance of the pre-trained InternVL3-8B model on multimodal benchmarks with different positional encoding strategies. When employing V2PE, the impact of different positional increment values $\delta$ is systematically evaluated. + +ternVL3 series consistently demonstrates superior performance over the Qwen2.5 chat models across most evaluation benchmarks. + +This observed enhancement in language capabilities primarily arises from several factors, including the integration of approximately $25\%$ pure-language data, joint parameter optimization during native multimodal pre-training, and the extensive use of high-quality textual corpora during the subsequent post-training stage. Such an approach not only strengthens multimodal comprehension but also significantly enhances language proficiency. Consequently, even when derived from identical pre-trained base models, the integrated multimodal and pure-text training strategy employed by InternVL3 results in substantially improved performance in language capabilities compared to the specialized training pipeline designed for pure-text tasks used by the Qwen2.5 chat models. + +![](images/ec927fbd80730a141c1d6b95f58d4e1e79c5b82d743ca43baae18e32892499e3.jpg) +Figure 3: Performance comparison on multimodal benchmarks under different training strategies. Native multimodal pre-training endows MLLMs with strong multimodal capabilities, even without further post-training. + +
ModelMPOMMMUMathVistaMathVisionMathVerseDynaMathWeMathLogicVistaOverall
InternVL3-1B43.447.213.818.14.214.731.124.6
43.445.818.818.75.813.429.825.1 (+0.5)
InternVL3-2B49.159.022.023.213.418.130.030.7
48.657.021.725.314.622.436.932.4 (+1.7)
InternVL3-8B61.967.424.736.922.832.743.241.4
62.771.629.339.825.537.144.144.3 (+2.9)
InternVL3-9B59.068.828.932.223.032.546.541.6
57.771.527.635.326.733.849.243.1 (+1.5)
InternVL3-14B67.170.531.238.827.938.149.946.2
67.175.137.244.431.343.051.249.9 (+3.7)
InternVL3-38B69.371.234.245.122.241.754.448.3
70.175.134.248.235.348.658.452.8 (+4.5)
InternVL3-78B72.274.035.244.231.742.553.550.5
72.279.043.151.035.146.155.954.6 (+4.1)
+ +Table 13: Comparison of reasoning abilities before and after Mixed Preference Optimization (MPO). + +# 3.14 Ablation Study + +The Effectiveness of Native Multimodal Pre-Training. To assess the effectiveness of native multimodal pre-training, we conduct experiments on the InternVL2-8B model while keeping its architecture, initialization parameters, and training data entirely unchanged. Traditionally, InternVL2-8B employs a training pipeline that begins with an MLP warmup phase for multimodal alignment, followed by an instruction-tuning stage. In our experiments, we substitute the conventional MLP warmup phase with our native multimodal pre-training process. This modification isolates the contribution of native multimodal pre-training to the overall multimodal capability of the model. + +The evaluation results in Figure 3 show that the model with native multimodal pre-training exhibits performance on most benchmarks that is comparable to the fully multi-stage-trained InternVL2-8B baseline. Furthermore, when followed by instruction tuning on higher-quality data, the model demonstrates further performance gains across evaluated multimodal tasks. These findings underscore the efficiency of native multimodal pre-training in imparting powerful multimodal capabilities to MLLMs. + +The Evaluation of Variable Visual Position Encoding. To promote the multimodal capabilities in long-context scenarios, InternVL3 employs Variable Visual Position Encoding (V2PE) in its visual embedding. However, in the original V2PE [42], this specialized positional encoding for visual tokens did not yield benefits on + +multimodal tasks with moderate context lengths. To further explore the efficacy of V2PE in a broader setting, we incorporated it during the native multimodal pre-training stage and evaluated the InternVL3-8B pre-trained model on standard multimodal benchmarks. + +As reported in Table 12, the introduction of V2PE leads to significant performance gains across most evaluation metrics. In addition, our ablation studies—by varying the positional increment $\delta$ —reveal that even for tasks primarily involving short contexts, relatively small $\delta$ values can achieve optimal performance. These findings provide important insights for future efforts aimed at refining position encoding strategies for visual tokens in MLLMs. It is important to note that, to ensure fair comparisons, all results elsewhere in this report maintain a fixed $\delta = 1$ , except for the experimental results presented in Table 12. + +Mixed Preference Optimization. Here, we demonstrate the effectiveness of MPO. As shown in Table 13, models fine-tuned with MPO demonstrate superior reasoning performance across seven multimodal reasoning benchmarks compared to their counterparts without MPO. Specifically, InternVL3-78B and InternVL3-38B outperform their counterparts by 4.1 and 4.5 points, respectively. Notably, the training data used for MPO is a subset of that used for SFT, indicating that the performance improvements primarily stem from the training algorithm rather than the training data. + +# 4 Conclusion + +We have introduced InternVL3, a significant advancement in the InternVL series that implements a native multimodal pre-training paradigm. By jointly learning linguistic and multimodal capabilities during the pretraining phase, InternVL3 avoids the training complexities and optimization challenges typically associated with post-hoc MLLM training pipelines. Through the incorporation of variable visual position encoding (V2PE) for extended multimodal contexts, advanced post-training strategies—such as supervised fine-tuning and mixed preference optimization—and test-time scaling, InternVL3 establishes a new open-source benchmark across a wide range of multimodal tasks, while simultaneously preserving robust linguistic competencies. Notably, InternVL3-78B attains a 72.2-point score on the MMMU benchmark, exceeding previous open-source MLLMs and reducing the performance gap relative to leading proprietary counterparts (e.g., Gemini-2.5 Pro). In line with our commitment to fostering community-driven innovation in multimodal large language models, we will publicly release InternVL3's training data and model weights, thereby encouraging further research and development in this rapidly evolving field. + +# References + +[1] Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ilge Akkaya, Florencia Leoni Aleman, Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, et al. Gpt-4 technical report. arXiv preprint arXiv:2303.08774, 2023. 15 +[2] Anonymous. CG-bench: Clue-grounded question answering benchmark for long video understanding. In Submitted to The Thirteenth International Conference on Learning Representations, 2024. under review. 14, 15 +[3] Anthropic. The claude 3 model family: Opus, sonnet, haiku. https://www.anthropic.com, 2024. 2, 8, 9, 10, 11, 12 +[4] Jacob Austin, Augustus Odena, Maxwell Nye, Maarten Bosma, Henryk Michalewski, David Dohan, Ellen Jiang, Carrie Cai, Michael Terry, Quoc Le, et al. Program synthesis with large language models. arXiv preprint arXiv:2108.07732, 2021. 16 +[5] Jinze Bai, Shuai Bai, Shusheng Yang, Shijie Wang, Sinan Tan, Peng Wang, Junyang Lin, Chang Zhou, and Jingren Zhou. Qwen-vl: A frontier large vision-language model with versatile abilities. arXiv preprint arXiv:2308.12966, 2023. 1, 2 +[6] Shuai Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Sibo Song, Kai Dang, Peng Wang, Shijie Wang, Jun Tang, et al. Qwen2. 5-vl technical report. arXiv preprint arXiv:2502.13923, 2025. 11, 12, 13, 14 +[7] Shuai Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Sibo Song, Kai Dang, Peng Wang, Shijie Wang, Jun Tang, et al. Qwen2.5-vl technical report. arXiv preprint arXiv:2502.13923, 2025. 1, 2, 9, 10, 15 +[8] Loubna Ben Allal, Anton Lozhkov, Guilherme Penedo, Thomas Wolf, and Leandro von Werra. Smoll-m-corpus, 2024. 5 + +[9] Ali Furkan Biten, Ruben Tito, Andres Mafla, Lluis Gomez, Marçal Rusinol, Ernest Valveny, CV Jawahar, and Dimosthenis Karatzas. Scene text visual question answering. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 4291-4301, 2019. 6 +[10] Jie Cao and Jing Xiao. An augmented benchmark dataset for geometric question answering through dual parallel text encoding. In Proceedings of the 29th International Conference on Computational Linguistics, pages 1511-1520, 2022. 6 +[11] Shuaichen Chang, David Palzer, Jialin Li, Eric Fosler-Lussier, and Ningchuan Xiao. Mapqa: A dataset for question answering on choropleth maps. arXiv preprint arXiv:2211.08545, 2022. 6 +[12] Keqin Chen, Zhao Zhang, Weili Zeng, Richong Zhang, Feng Zhu, and Rui Zhao. Shikra: Unleashing multimodal lmm's referential dialogue magic. arXiv preprint arXiv:2306.15195, 2023. 13 +[13] Lin Chen, Jinsong Li, Xiaoyi Dong, Pan Zhang, Yuhang Zang, Zehui Chen, Haodong Duan, Jiaqi Wang, Yu Qiao, Dahua Lin, et al. Are we on the right way for evaluating large vision-language models? arXiv preprint arXiv:2403.20330, 2024. 12 +[14] Mark Chen, Jerry Tworek, Heewoo Jun, Qiming Yuan, Henrique Ponde de Oliveira Pinto, Jared Kaplan, Harri Edwards, Yuri Burda, Nicholas Joseph, Greg Brockman, et al. Evaluating large language models trained on code. arXiv preprint arXiv:2107.03374, 2021. 16 +[15] Qiaoling Chen, Diandian Gu, Guoteng Wang, Xun Chen, YingTong Xiong, Ting Huang, Qinghao Hu, Xin Jin, Yonggang Wen, Tianwei Zhang, et al. Internevo: Efficient long-sequence large language model training via hybrid parallelism and redundant sharding. arXiv preprint arXiv:2401.09149, 2024. 2, 7 +[16] Qiguang Chen, Libo Qin, Jin Zhang, Zhi Chen, Xiao Xu, and Wanxiang Che. M3cot: A novel benchmark for multi-domain multi-step multi-modal chain-of-thought. arXiv preprint arXiv:2405.16473, 2024. 6 +[17] Wenhu Chen, Ming Yin, Max Ku, Pan Lu, Yixin Wan, Xueguang Ma, Jianyu Xu, Xinyi Wang, and Tony Xia. Theoremqa: A theorem-driven question answering dataset. In Houda Bouamor, Juan Pino, and Kalika Bali, editors, Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, EMNLP 2023, Singapore, December 6-10, 2023, pages 7889-7901. Association for Computational Linguistics, 2023. 16 +[18] Zhe Chen, Weiyun Wang, Yue Cao, Yangzhou Liu, Zhangwei Gao, Erfei Cui, Jinguo Zhu, Shenglong Ye, Hao Tian, Zhaoyang Liu, et al. Expanding performance boundaries of open-source multimodal models with model, data, and test-time scaling. arXiv preprint arXiv:2412.05271, 2024. 1, 2, 3, 5, 6, 9, 10, 11, 12, 13, 14, 15 +[19] Zhe Chen, Weiyun Wang, Hao Tian, Shenglong Ye, Zhangwei Gao, Erfei Cui, Wenwen Tong, Kongzhi Hu, Jiapeng Luo, Zheng Ma, et al. How far are we to gpt-4v? closing the gap to commercial multimodal models with open-source suites. arXiv preprint arXiv:2404.16821, 2024. 1, 3, 10, 11, 12, 13, 14, 15 +[20] Zhe Chen, Weiyun Wang, Hao Tian, Shenglong Ye, Zhangwei Gao, Erfei Cui, Wenwen Tong, Kongzhi Hu, Jiapeng Luo, Zheng Ma, et al. How far are we to gpt-4v? closing the gap to commercial multimodal models with open-source suites. arXiv preprint arXiv:2404.16821, 2024. 2, 3 +[21] Zhe Chen, Jiannan Wu, Wenhai Wang, Weijie Su, Guo Chen, Sen Xing, Muyan Zhong, Qinglong Zhang, Xizhou Zhu, Lewei Lu, et al. Internvl: Scaling up vision foundation models and aligning for generic visual-linguistic tasks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 24185–24198, 2024. 1, 2, 3 +[22] Kanzhi Cheng, Qiushi Sun, Yougang Chu, Fangzhi Xu, Yantao Li, Jianbing Zhang, and Zhiyong Wu. Seeclick: Harnessing gui grounding for advanced visual gui agents. arXiv preprint arXiv:2401.10935, 2024. 16 +[23] Zesen Cheng, Sicong Leng, Hang Zhang, Yifei Xin, Xin Li, Guanzheng Chen, Yongxin Zhu, Wenqi Zhang, Ziyang Luo, Deli Zhao, et al. Videollama 2: Advancing spatial-temporal modeling and audio understanding in video-llms. arXiv preprint arXiv:2406.07476, 2024. 15 +[24] Christopher Clark and Matt Gardner. Simple and effective multi-paragraph reading comprehension. In Proceedings of the Annual Meeting of the Association for Computational Linguistics, pages 845–855, 2018. 6 +[25] Karl Cobbe, Vineet Kosaraju, Mohammad Bavarian, Mark Chen, Heewoo Jun, Lukasz Kaiser, Matthias Plappert, Jerry Tworek, Jacob Hilton, Reiichiro Nakano, et al. Training verifiers to solve math word problems. arXiv preprint arXiv:2110.14168, 2021. 16 +[26] OpenCompass Contributors. Opencompass: A universal evaluation platform for foundation models. https://github.com/open-compass/opencompass, 2023. 9, 10, 11, 12 + +[27] X.AI Corp. Grok-1.5 vision preview: Connecting the digital and physical worlds with our first multimodal model. https://x.ai/blog/grok-1.5v, 2024.11 +[28] Wenliang Dai, Nayeon Lee, Boxin Wang, Zhuolin Yang, Zihan Liu, Jon Barker, Tuomas Rintamaki, Moham-mad Shoeybi, Bryan Catanzaro, and Wei Ping. NvIm: Open frontier-class multimodal llms. arXiv preprint arXiv:2409.11402, 2024. 10 +[29] Google Deepmind. Gemini 2.0 is now available to everyone. https://blog.google/technology/google-deepmind/gemini-model-updates-february-2025/, 202.9 +[30] Google Deepmind. Introducing gemini 2.0: our new ai model for the agentic era. https://blog.google/technology/google-deepmind/google-gemini-ai-update-december-2024/, 2024.9 +[31] Matt Deitke, Christopher Clark, Sangho Lee, Rohun Tripathi, Yue Yang, Jae Sung Park, Mohammadreza Salehi, Niklas Muennighoff, Kyle Lo, Luca Soldaini, et al. Molmo and pixmo: Open weights and open data for state-of-the-art multimodal models. arXiv preprint arXiv:2409.17146, 2024. 1, 10 +[32] Xiaoyi Dong, Pan Zhang, Yuhang Zang, Yuhang Cao, Bin Wang, Linke Ouyang, Songyang Zhang, Haodong Duan, Wenwei Zhang, Yining Li, et al. Internlm-xcomposer2-4khd: A pioneering large vision-language model handling resolutions from 336 pixels to 4k hd. arXiv preprint arXiv:2404.06512, 2024. 1 +[33] Haodong Duan, Junming Yang, Yuxuan Qiao, Xinyu Fang, Lin Chen, Yuan Liu, Xiaoyi Dong, Yuhang Zang, Pan Zhang, Jiaqi Wang, et al. Vlmevalkit: An open-source toolkit for evaluating large multi-modality models. In Proceedings of the 32nd ACM International Conference on Multimedia, pages 11198-11201, 2024. 7 +[34] Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Amy Yang, Angela Fan, et al. The llama 3 herd of models. arXiv preprint arXiv:2407.21783, 2024. 10 +[35] Xinyu Fang, Kangrui Mao, Haodong Duan, Xiangyu Zhao, Yining Li, Dahua Lin, and Kai Chen. Mmbench-video: A long-form multi-shot benchmark for holistic video understanding. arXiv preprint arXiv:2406.14515, 2024. 14, 15 +[36] Li Fei-Fei, Rob Fergus, and Pietro Perona. Learning generative visual models from few training examples: An incremental bayesian approach tested on 101 object categories. In Conference on Computer Vision and Pattern Recognition Workshop, pages 178-178, 2004. 7 +[37] Chaoyou Fu, Peixian Chen, Yunhang Shen, Yulei Qin, Mengdan Zhang, Xu Lin, Zhenyu Qiu, Wei Lin, Jinrui Yang, Xiawu Zheng, et al. Mme: A comprehensive evaluation benchmark for multimodal large language models. arXiv preprint arXiv:2306.13394, 2023. 12 +[38] Chaoyou Fu, Yuhan Dai, Yondong Luo, Lei Li, Shuhuai Ren, Renrui Zhang, Zihan Wang, Chenyu Zhou, Yunhang Shen, Mengdan Zhang, et al. Video-mme: The first-ever comprehensive evaluation benchmark of multi-modal llms in video analysis. arXiv preprint arXiv:2405.21075, 2024. 14, 15 +[39] Xingyu Fu, Yushi Hu, Bangzheng Li, Yu Feng, Haoyu Wang, Xudong Lin, Dan Roth, Noah A Smith, Wei-Chiu Ma, and Ranjay Krishna. Blink: Multimodal large language models can see but not perceive. arXiv preprint arXiv:2404.12390, 2024. 9, 11 +[40] Jiahui Gao, Renjie Pi, Jipeng Zhang, Jiacheng Ye, Wanjun Zhong, Yufei Wang, Lanqing Hong, Jianhua Han, Hang Xu, Zhenguo Li, et al. G-llava: Solving geometric problem with multi-modal large language model. arXiv preprint arXiv:2312.11370, 2023. 6 +[41] Zhangwei Gao, Zhe Chen, Erfei Cui, Yiming Ren, Weiyun Wang, Jinguo Zhu, Hao Tian, Shenglong Ye, Junjun He, Xizhou Zhu, et al. Mini-internvl: A flexible-transfer pocket multimodal model with $5\%$ parameters and $90\%$ performance. arXiv preprint arXiv:2410.16261, 2024. 3 +[42] Junqi Ge, Ziyi Chen, Jintao Lin, Jinguo Zhu, Xihui Liu, Jifeng Dai, and Xizhou Zhu. V2pe: Improving multi-modal long-context capability of vision-language models with variable visual position encoding. arXiv preprint arXiv:2412.09616, 2024. 2, 3, 18 +[43] Yash Goyal, Tejas Khot, Douglas Summers-Stay, Dhruv Batra, and Devi Parikh. Making the v in vqa matter: Elevating the role of image understanding in visual question answering. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6904-6913, 2017. 6 +[44] Shuhao Gu, Jialing Zhang, Siyuan Zhou, Kevin Yu, Zhaohu Xing, Liangdong Wang, Zhou Cao, Jintao Jia, Zhuoyi Zhang, Yixuan Wang, et al. Infinity-mm: Scaling multimodal performance with large-scale and high-quality instruction data. arXiv preprint arXiv:2410.18558, 2024. 9, 10 + +[45] Tianrui Guan, Fuxiao Liu, Xiyang Wu, Ruiqi Xian, Zongxia Li, Xiaoyu Liu, Xijun Wang, Lichang Chen, Furong Huang, Yaser Yacoob, et al. Hallusionbench: An advanced diagnostic suite for entangled language hallucination & visual illusion in large vision-language models. arXiv preprint arXiv:2310.14566, 2023. 8, 12, 13 +[46] Dan Hendrycks, Collin Burns, Steven Basart, Andy Zou, Mantas Mazeika, Dawn Song, and Jacob Steinhardt. Measuring massive multitask language understanding. In The International Conference on Learning Representations, 2020. 16 +[47] Dan Hendrycks, Collin Burns, Saurav Kadavath, Akul Arora, Steven Basart, Eric Tang, Dawn Song, and Jacob Steinhardt. Measuring mathematical problem solving with the MATH dataset. In Joaquin Vanschoeren and Sai-Kit Yeung, editors, Proceedings of the Neural Information Processing Systems Track on Datasets and Benchmarks 1, NeurIPS Datasets and Benchmarks 2021, December 2021, virtual, 2021. 16 +[48] Yuzhen Huang, Yuzhuo Bai, Zhihao Zhu, Junlei Zhang, Jinghan Zhang, Tangjun Su, Junteng Liu, Chuancheng Lv, Yikai Zhang, Yao Fu, et al. C-eval: A multi-level multi-discipline chinese evaluation suite for foundation models. Advances in Neural Information Processing Systems, 36, 2024. 16 +[49] Zheng Huang, Kai Chen, Jianhua He, Xiang Bai, Dimosthenis Karatzas, Shijian Lu, and CV Jawahar. Icdar2019 competition on scanned receiptOCR and information extraction. In 2019 International Conference on Document Analysis and Recognition (ICDAR), pages 1516-1520. IEEE, 2019. 6 +[50] Drew A Hudson and Christopher D Manning. Gqa: A new dataset for real-world visual reasoning and compositional question answering. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6700–6709, 2019. 6 +[51] Dongfu Jiang, Xuan He, Huaye Zeng, Cong Wei, Max Ku, Qian Liu, and Wenhu Chen. Mantis: Interleaved multi-image instruction tuning. arXiv preprint arXiv:2405.01483, 2024. 9, 11 +[52] Mandar Joshi, Eunsol Choi, Daniel S Weld, and Luke Zettlemoyer. Triviaqa: A large scale distantly supervised challenge dataset for reading comprehension. arXiv preprint arXiv:1705.03551, 2017. 16 +[53] Seungjae Jung, Gunsoo Han, Daniel Wontae Nam, and Kyoung-Woon On. Binary classifier optimization for large language model alignment. arXiv preprint arXiv:2404.04656, 2024. 6 +[54] Kushal Kafle, Brian Price, Scott Cohen, and Christopher Kanan. Dvqa: Understanding data visualizations via question answering. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5648-5656, 2018. 6 +[55] Mehran Kazemi, Hamidreza Alvari, Ankit Anand, Jialin Wu, Xi Chen, and Radu Soricut. Geomverse: A systematic evaluation of large models for geometric reasoning. arXiv preprint arXiv:2312.12241, 2023. 6 +[56] Sahar Kazemzadeh, Vicente Ordonez, Mark Matten, and Tamara Berg. Referitgame: Referring to objects in photographs of natural scenes. In Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing, pages 787-798, 2014. 13 +[57] Aniruddha Kembhavi, Mike Salvato, Eric Kolve, Minjoon Seo, Hannaneh Hajishirzi, and Ali Farhadi. A diagram is worth a dozen images. In European Conference on Computer Vision, pages 235-251, 2016. 6, 7, 8, 10 +[58] Tom Kwiatkowski, Jennimaria Palomaki, Olivia Redfield, Michael Collins, Ankur Parikh, Chris Alberti, Danielle Epstein, Illia Polosukhin, Jacob Devlin, Kenton Lee, et al. Natural questions: a benchmark for question answering research. Transactions of the Association for Computational Linguistics, 7:453-466, 2019. 16 +[59] Guokun Lai, Qizhe Xie, Hanxiao Liu, Yiming Yang, and Eduard Hovy. Race: Large-scale reading comprehension dataset from examinations. arXiv preprint arXiv:1704.04683, 2017. 16 +[60] Bo Li, Yuanhan Zhang, Dong Guo, Renrui Zhang, Feng Li, Hao Zhang, Kaichen Zhang, Yanwei Li, Ziwei Liu, and Chunyuan Li. Llava-onevision: Easy visual task transfer. arXiv preprint arXiv:2408.03326, 2024. 9, 10, 11, 12, 15, 16 +[61] Bohao Li, Yuying Ge, Yi Chen, Yixiao Ge, Ruimao Zhang, and Ying Shan. Seed-bench-2-plus: Benchmarking multimodal large language models with text-rich visual comprehension. arXiv preprint arXiv:2404.16790, 2024.8, 10 +[62] Chunyi Li, Jianbo Zhang, Zicheng Zhang, Haoning Wu, Yuan Tian, Wei Sun, Guo Lu, Xiaohong Liu, Xiongkuo Min, Weisi Lin, et al. R-bench: Are your large multimodal model robust to real-world corruptions? arXiv preprint arXiv:2410.05474, 2024. 11 +[63] Haonan Li, Yixuan Zhang, Fajri Koto, Yifei Yang, Hai Zhao, Yeyun Gong, Nan Duan, and Timothy Baldwin. Cmflu: Measuring massive multitask language understanding in chinese. arXiv preprint arXiv:2306.09212, 2023. 16 + +[64] KunChang Li, Yinan He, Yi Wang, Yizhuo Li, Wenhai Wang, Ping Luo, Yali Wang, Limin Wang, and Yu Qiao. Videochat: Chat-centric video understanding. arXiv preprint arXiv:2305.06355, 2023. 15 +[65] Kunchang Li, Yali Wang, Yinan He, Yizhuo Li, Yi Wang, Yi Liu, Zun Wang, Jilan Xu, Guo Chen, Ping Luo, et al. Mvbench: A comprehensive multi-modal video understanding benchmark. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 22195-22206, 2024. 14, 15 +[66] Yanghao Li, Chao-Yuan Wu, Haoqi Fan, Karttikeya Mangalam, Bo Xiong, Jitendra Malik, and Christoph Feichtenhofer. Mviv2: Improved multiscale vision transformers for classification and detection. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4804-4814, 2022. 1, 3 +[67] Yifan Li, Yifan Du, Kun Zhou, Jinping Wang, Wayne Xin Zhao, and Ji-Rong Wen. Evaluating object hallucination in large vision-language models. In The Conference on Empirical Methods in Natural Language Processing, pages 292–305, 2023. 12, 13 +[68] Zhang Li, Biao Yang, Qiang Liu, Zhiyin Ma, Shuo Zhang, Jingxu Yang, Yabo Sun, Yuliang Liu, and Xiang Bai. Monkey: Image resolution and text label are important things for large multi-modal models. arXiv preprint arXiv:2311.06607, 2023. 1 +[69] Zhiqi Li, Guo Chen, Shilong Liu, Shihao Wang, Vibashan VS, Yishen Ji, Shiyi Lan, Hao Zhang, Yilin Zhao, Subhashree Radhakrishnan, et al. Eagle 2: Building post-training data strategies from scratch for frontier vision-language models. arXiv preprint arXiv:2501.14818, 2025. 1 +[70] Hunter Lightman, Vineet Kosaraju, Yuri Burda, Harrison Edwards, Bowen Baker, Teddy Lee, Jan Leike, John Schulman, Ilya Sutskever, and Karl Cobbe. Let's verify step by step. In The Twelfth International Conference on Learning Representations, 2023. 7 +[71] Ji Lin, Hongxu Yin, Wei Ping, Pavlo Molchanov, Mohammad Shoeybi, and Song Han. Vila: On pre-training for visual language models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 26689-26699, 2024. 1, 9, 10, 15, 16 +[72] Adam Dahlgren Lindström and Savitha Sam Abraham. Clevr-math: A dataset for compositional language, visual and mathematical reasoning. arXiv preprint arXiv:2208.05358, 2022. 6 +[73] Haotian Liu, Chunyuan Li, Qingyang Wu, and Yong Jae Lee. Visual instruction tuning. Advances in Neural Information Processing Systems, 36, 2023. 2 +[74] Shilong Liu, Zhaoyang Zeng, Tianhe Ren, Feng Li, Hao Zhang, Jie Yang, Qing Jiang, Chunyuan Li, Jianwei Yang, Hang Su, et al. Grounding dino: Marrying dino with grounded pre-training for open-set object detection. In European Conference on Computer Vision, pages 38-55. Springer, 2025. 13 +[75] Yuan Liu, Haodong Duan, Yuanhan Zhang, Bo Li, Songyang Zhang, Wangbo Zhao, Yike Yuan, Jiaqi Wang, Conghui He, Ziwei Liu, et al. Mmbench: Is your multi-modal model an all-around player? arXiv preprint arXiv:2307.06281, 2023. 12 +[76] Yuliang Liu, Zhang Li, Hongliang Li, Wenwen Yu, Mingxin Huang, Dezhi Peng, Mingyu Liu, Mingrui Chen, Chunyuan Li, Lianwen Jin, et al. On the hidden mystery ofOCR in large multimodal models. arXiv preprint arXiv:2305.07895, 2023. 8, 10 +[77] Zihan Liu, Yang Chen, Mohammad Shoeybi, Bryan Catanzaro, and Wei Ping. Acemath: Advancing frontier math reasoning with post-training and reward modeling. arXiv preprint, 2024. 5 +[78] Zuyan Liu, Yuhao Dong, Ziwei Liu, Winston Hu, Jiwen Lu, and Yongming Rao. Oryx mllm: On-demand spatial-temporal understanding at arbitrary resolution. arXiv preprint arXiv:2409.12961, 2024. 15 +[79] Dakuan Lu, Xiaoyu Tan, Rui Xu, Tianchu Yao, Chao Qu, Wei Chu, Yinghui Xu, and Yuan Qi. Scp-116k: A high-quality problem-solution dataset and a generalized pipeline for automated extraction in the higher education science domain, 2025. 5 +[80] Pan Lu, Hritik Bansal, Tony Xia, Jiacheng Liu, Chunyuan Li, Hannaneh Hajishirzi, Hao Cheng, Kai-Wei Chang, Michel Galley, and Jianfeng Gao. Mathvista: Evaluating mathematical reasoning of foundation models in visual contexts. arXiv preprint arXiv:2310.02255, 2023. 7, 8, 9 +[81] Pan Lu, Ran Gong, Shibiao Jiang, Liang Qiu, Siyuan Huang, Xiaodan Liang, and Song-Chun Zhu. Inter-gps: Interpretable geometry problem solving with formal language and symbolic reasoning. arXiv preprint arXiv:2105.04165, 2021. 6 +[82] Pan Lu, Swaroop Mishra, Tanglin Xia, Liang Qiu, Kai-Wei Chang, Song-Chun Zhu, Oyvind Tafjord, Peter Clark, and Ashwin Kalyan. Learn to explain: Multimodal reasoning via thought chains for science question answering. Advances in Neural Information Processing Systems, 35:2507-2521, 2022. 6 + +[83] Pan Lu, Liang Qiu, Jiaqi Chen, Tony Xia, Yizhou Zhao, Wei Zhang, Zhou Yu, Xiaodan Liang, and Song-Chun Zhu. Iconqa: A new benchmark for abstract diagram understanding and visual language reasoning. arXiv preprint arXiv:2110.13214, 2021.6 +[84] Shiyin Lu, Yang Li, Qing-Guo Chen, Zhao Xu, Weihua Luo, Kaifu Zhang, and Han-Jia Ye. Ovis: Structural embedding alignment for multimodal large language model. arXiv preprint arXiv:2405.20797, 2024. 9, 10 +[85] Xudong Lu, Yinghao Chen, Cheng Chen, Hui Tan, Boheng Chen, Yina Xie, Rui Hu, Guanxin Tan, Renshou Wu, Yan Hu, et al. Bluelm-v-3b: Algorithm and system co-design for multimodal large language models on mobile devices. arXiv preprint arXiv:2411.10640, 2024. 1 +[86] Yujie Lu, Dongfu Jiang, Wenhu Chen, William Yang Wang, Yejin Choi, and Bill Yuchen Lin. Wildvision: Evaluating vision-language models in the wild with human preferences. arXiv preprint arXiv:2406.11069, 2024. 11 +[87] Liangchen Luo, Yinxiao Liu, Rosanne Liu, Samrat Phatale, Harsh Lara, Yunxuan Li, Lei Shu, Yun Zhu, Lei Meng, Jiao Sun, et al. Improve mathematical reasoning in language models by automated process supervision. arXiv preprint arXiv:2406.06592, 2, 2024. 7 +[88] Junhua Mao, Jonathan Huang, Alexander Toshev, Oana Camburu, Alan L Yuille, and Kevin Murphy. Generation and comprehension of unambiguous object descriptions. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11–20, 2016. 13 +[89] Andrés Marafioti, Orr Zohar, Miquel Farré, Merve Noyan, Elie Bakouch, Pedro Cuenca, Cyril Zakka, Loubna Ben Allal, Anton Lozhkov, Nouamane Tazi, et al. Smolvlm: Redefining small and efficient multimodal models. arXiv preprint arXiv:2504.05299, 2025. 1 +[90] Kenneth Marino, Mohammad Rastegari, Ali Farhadi, and Roozbeh Mottaghi. Ok-vqa: A visual question answering benchmark requiring external knowledge. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3195-3204, 2019. 6 +[91] Ahmed Masry, Xuan Long Do, Jia Qing Tan, Shafiq Joty, and Enamul Hoque. Chartqa: A benchmark for question answering about charts with visual and logical reasoning. In Proceedings of the Annual Meeting of the Association for Computational Linguistics, pages 2263-2279, 2022. 6, 7, 8, 10 +[92] Minesh Mathew, Viraj Bagal, Ruben Tito, Dimosthenis Karatzas, Ernest Valveny, and CV Jawahar. Infographicvqa. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pages 1697-1706, 2022. 6, 7, 8, 10 +[93] Minesh Mathew, Dimosthenis Karatzas, and CV Jawahar. Docvqa: A dataset for vqa on document images. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pages 2200–2209, 2021. 7, 8, 10 +[94] Nat McAleese, Rai Michael Pokorny, Juan Felipe Ceron Uribe, Evgenia Nitishinskaya, Maja Trebacz, and Jan Leike. Llm critics help catch llm bugs. arXiv preprint arXiv:2407.00215, 2024. 7 +[95] Fanqing Meng, Jin Wang, Chuanhao Li, Quanfeng Lu, Hao Tian, Jiaqi Liao, Xizhou Zhu, Jifeng Dai, Yu Qiao, Ping Luo, et al. Mmiu: Multimodal multi-image understanding for evaluating large vision-language models. arXiv preprint arXiv:2408.02718, 2024. 9, 11 +[96] Anand Mishra, Shashank Shekhar, Ajeet Kumar Singh, and Anirban Chakraborty. Ocr-vqa: Visual question answering by reading text in images. In International Conference on Document Analysis and Recognition, pages 947-952, 2019. 6 +[97] OpenAI. Gpt-4v(ison) system card. https://cdn.openai.com/papers/GPTV_System/Card.pdf, 2023.1,8,9,10,11,12,14,15,16 +[98] OpenAI. Gpt-4o system card. https://openai.com/index/gpt-4o-system-card/, 2025.2,8 +[99] Runqi Qiao, Qiuna Tan, Guanting Dong, Minhui Wu, Chong Sun, Xiaoshuai Song, Zhuoma GongQue, Shanglin Lei, Zhe Wei, Miaoxuan Zhang, et al. We-math: Does your large multimodal model achieve human-like mathematical reasoning? arXiv preprint arXiv:2407.01284, 2024. 8, 9 +[100] Yujia Qin, Yining Ye, Junjie Fang, Haoming Wang, Shihao Liang, Shizuo Tian, Junda Zhang, Jiahao Li, Yunxin Li, Shijue Huang, et al. Ui-tars: Pioneering automated gui interaction with native agents. arXiv preprint arXiv:2501.12326, 2025. 16 +[101] Rafael Rafailov, Archit Sharma, Eric Mitchell, Christopher D Manning, Stefano Ermon, and Chelsea Finn. Direct preference optimization: Your language model is secretly a reward model. Advances in Neural Information Processing Systems, 36, 2024. 6 + +[102] Machel Reid, Nikolay Savinov, Denis Teplyashin, Dmitry Lepikhin, Timothy Lillicrap, Jean-baptiste Alayrac, Radu Soricut, Angeliki Lazaridou, Orhan Firat, Julian Schrittwieser, et al. Gemini 1.5: Unlocking multimodal understanding across millions of tokens of context. arXiv preprint arXiv:2403.05530, 2024. 10, 11, 12, 15, 16 +[103] Keisuke Sakaguchi, Ronan Le Bras, Chandra Bhagavatula, and Yejin Choi. Winogrande: An adversarial winograd schema challenge at scale. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 34, pages 8732-8740, 2020. 16 +[104] Minjoon Seo, Hannaneh Hajishirzi, Ali Farhadi, Oren Etzioni, and Clint Malcolm. Solving geometry problems: Combining text and diagram interpretation. In Proceedings of the 2015 conference on empirical methods in natural language processing, pages 1466-1476, 2015. 6 +[105] Min Shi, Fuxiao Liu, Shihao Wang, Shijia Liao, Subhashree Radhakrishnan, De-An Huang, Hongxu Yin, Karan Sapra, Yaser Yacoob, Humphrey Shi, et al. Eagle: Exploring the design space for multimodal llms with mixture of encoders. arXiv preprint arXiv:2408.15998, 2024. 1 +[106] Wenhao Shi, Zhiqiang Hu, Yi Bin, Junhua Liu, Yang Yang, See-Kiong Ng, Lidong Bing, and Roy Ka-Wei Lee. Math-llava: Bootstrapping mathematical reasoning for multimodal large language models. arXiv preprint arXiv:2406.17294, 2024. 6 +[107] Amanpreet Singh, Vivek Natarajan, Meet Shah, Yu Jiang, Xinlei Chen, Dhruv Batra, Devi Parikh, and Marcus Rohrbach. Towards vqa models that can read. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8317-8326, 2019. 6, 8, 10 +[108] Charlie Snell, Jaehoon Lee, Kelvin Xu, and Aviral Kumar. Scaling llm test-time compute optimally can be more effective than scaling model parameters. arXiv preprint arXiv:2408.03314, 2024. 7 +[109] Hai-Long Sun, Da-Wei Zhou, Yang Li, Shiyin Lu, Chao Yi, Qing-Guo Chen, Zhao Xu, Weihua Luo, Kaifu Zhang, De-Chuan Zhan, et al. Parrot: Multilingual visual instruction tuning. arXiv preprint arXiv:2406.02539, 2024. 14 +[110] Kai Sun, Dian Yu, Dong Yu, and Claire Cardie. Investigating prior knowledge for challenging Chinese machine reading comprehension. Transactions of the Association for Computational Linguistics, 8:141-155, 2020. 16 +[111] Zhiqing Sun, Sheng Shen, Shengcao Cao, Haotian Liu, Chunyuan Li, Yikang Shen, Chuang Gan, Liang-Yan Gui, Yu-Xiong Wang, Yiming Yang, et al. Aligning large multimodal models with factually augmented rlhf. arXiv preprint arXiv:2309.14525, 2023. 12, 13 +[112] Mirac Suzgun, Nathan Scales, Nathanael Schärli, Sebastian Gehrmann, Yi Tay, Hyung Won Chung, Aakanksha Chowdhery, Quoc V Le, Ed H Chi, Denny Zhou, et al. Challenging big-bench tasks and whether chain-of-thought can solve them. arXiv preprint arXiv:2210.09261, 2022. 16 +[113] Jingqun Tang, Qi Liu, Yongjie Ye, Jinghui Lu, Shu Wei, Chunhui Lin, Wanqing Li, Mohamad Fitri Faiz Bin Mahmood, Hao Feng, Zhen Zhao, et al. Mtvqa: Benchmarking multilingual text-centric visual question answering. arXiv preprint arXiv:2405.11985, 2024. 14 +[114] Gemini Team, Rohan Anil, Sebastian Borgeaud, Yonghui Wu, Jean-Baptiste Alayrac, Jiahui Yu, Radu Soricut, Johan Schalkwyk, Andrew M Dai, Anja Hauth, et al. Gemini: a family of highly capable multimodal models. arXiv preprint arXiv:2312.11805, 2023. 1, 14 +[115] Qwen Team. Qvq: To see the world with wisdom, December 2024. 9 +[116] Shengbang Tong, Ellis Brown, Penghao Wu, Sanghyun Woo, Manoj Middepogu, Sai Charitha Akula, Jihan Yang, Shusheng Yang, Adithya Iyer, Xichen Pan, et al. Cambrian-1: A fully open, vision-centric exploration of multimodal llms. arXiv preprint arXiv:2406.16860, 2024. 9, 10, 11, 12 +[117] v DeepMind. Gemini 2.5 pro. https://deepmind.google/technologies/gemini/pro/, 2025. 1, 2, 8 +[118] Fei Wang, Xingyu Fu, James Y Huang, Zekun Li, Qin Liu, Xiaogeng Liu, Mingyu Derek Ma, Nan Xu, Wenxuan Zhou, Kai Zhang, et al. Muirbench: A comprehensive benchmark for robust multi-image understanding. arXiv preprint arXiv:2406.09411, 2024. 9, 11 +[119] Ke Wang, Junting Pan, Weikang Shi, Zimu Lu, Mingjie Zhan, and Hongsheng Li. Measuring multimodal mathematical reasoning with math-vision dataset. arXiv preprint arXiv:2402.14804, 2024. 8, 9 +[120] Peiyi Wang, Lei Li, Zhihong Shao, RX Xu, Damai Dai, Yifei Li, Deli Chen, Yu Wu, and Zhifang Sui. Math-shepherd: Verify and reinforce lms step-by-step without human annotations. arXiv preprint arXiv:2312.08935, 2023. 7 +[121] Peng Wang, Shuai Bai, Sinan Tan, Shijie Wang, Zhihao Fan, Jinze Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, et al. Qwen2-vl: Enhancing vision-language model's perception of the world at any resolution. arXiv preprint arXiv:2409.12191, 2024. 1, 8, 10, 11, 12, 13, 14, 15 + +[122] Peng Wang, Shijie Wang, Junyang Lin, Shuai Bai, Xiaohuan Zhou, Jingren Zhou, Xinggang Wang, and Chang Zhou. One-peace: Exploring one general representation model toward unlimited modalities. arXiv:2305.11172, 2023. 13 +[123] Weihan Wang, Qingsong Lv, Wenmeng Yu, Wenyi Hong, Ji Qi, Yan Wang, Junhui Ji, Zhuoyi Yang, Lei Zhao, Xixuan Song, et al. Cogvlm: Visual expert for pretrained language models. arXiv preprint arXiv:2311.03079, 2023. 1, 13 +[124] Weiyun Wang, Zhe Chen, Wenhai Wang, Yue Cao, Yangzhou Liu, Zhangwei Gao, Jinguo Zhu, Xizhou Zhu, Lewei Lu, Yu Qiao, and Jifeng Dai. Enhancing the reasoning ability of multimodal large language models via mixed preference optimization. arXiv preprint arXiv:2411.10442, 2024. 2, 6, 7 +[125] Weiyun Wang, Zhangwei Gao, Lianjie Chen, Zhe Chen, Jinguo Zhu, Xiangyu Zhao, Yangzhou Liu, Yue Cao, Shenglong Ye, Xizhou Zhu, et al. Visualprm: An effective process reward model for multimodal reasoning. arXiv preprint arXiv:2503.10291, 2025. 2, 7, 8, 9 +[126] Weiyun Wang, Yiming Ren, Haowen Luo, Tiantong Li, Chenxiang Yan, Zhe Chen, Wenhai Wang, Qingyun Li, Lewei Lu, Xizhou Zhu, et al. The all-seeing project v2: Towards general relation comprehension of the open world. arXiv preprint arXiv:2402.19474, 2024. 6, 12, 13 +[127] Weiyun Wang, Min Shi, Qingyun Li, Wenhai Wang, Zhenhang Huang, Linjie Xing, Zhe Chen, Hao Li, Xizhou Zhu, Zhiguo Cao, et al. The all-seeing project: Towards panoptic visual recognition and understanding of the open world. In The International Conference on Learning Representations, 2024. 6 +[128] Zirui Wang, Mengzhou Xia, Luxi He, Howard Chen, Yitao Liu, Richard Zhu, Kaiqu Liang, Xindi Wu, Haotian Liu, Sadhika Malladi, et al. Charxiv: Charting gaps in realistic chart understanding in multimodal llms. arXiv preprint arXiv:2406.18521, 2024. 8, 10 +[129] Haoning Wu, Dongxu Li, Bei Chen, and Junnan Li. Longvideobench: A benchmark for long-context interleaved video-language understanding. arXiv preprint arXiv:2407.15754, 2024. 8, 14, 15 +[130] Zhiyong Wu, Zhenyu Wu, Fangzhi Xu, Yian Wang, Qiushi Sun, Chengyou Jia, Kanzhi Cheng, Zichen Ding, Liheng Chen, Paul Pu Liang, et al. Os-atlas: A foundation action model for generalist gui agents. arXiv preprint arXiv:2410.23218, 2024. 16 +[131] Yijia Xiao, Edward Sun, Tianyu Liu, and Wei Wang. Logicvista: Multimodal llm logical reasoning benchmark in visual contexts. arXiv preprint arXiv:2407.04973, 2024. 8, 9 +[132] Yiheng Xu, Zekun Wang, Junli Wang, Dunjie Lu, Tianbao Xie, Amrita Saha, Doyen Sahoo, Tao Yu, and Caiming Xiong. Aguvis: Unified pure vision agents for autonomous gui interaction. 2024. 16 +[133] B. Yan, Yi Jiang, Jiannan Wu, D. Wang, Ping Luo, Zehuan Yuan, and Hutchuan Lu. Universal instance perception as object discovery and retrieval. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2023. 13 +[134] Jihan Yang, Shusheng Yang, Anjali Gupta, Rilyn Han, Li Fei-Fei, and Saining Xie. Thinking in Space: How Multimodal Large Language Models See, Remember and Recall Spaces. arXiv preprint arXiv:2412.14171, 2024. 16 +[135] Yuan Yao, Tianyu Yu, Ao Zhang, Chongyi Wang, Junbo Cui, Hongji Zhu, Tianchi Cai, Haoyu Li, Weilin Zhao, Zhihui He, et al. Minicpm-v: A gpt-4v level mllm on your phone. arXiv preprint arXiv:2408.01800, 2024. 9, 10, 11, 12, 15 +[136] Qinghao Ye, Haiyang Xu, Jiabo Ye, Ming Yan, Haowei Liu, Qi Qian, Ji Zhang, Fei Huang, and Jingren Zhou. mplug-owl2: Revolutionizing multi-modal large language model with modality collaboration. arXiv preprint arXiv:2311.04257, 2023. 1, 14 +[137] Kaining Ying, Fanqing Meng, Jin Wang, Zhiqian Li, Han Lin, Yue Yang, Hao Zhang, Wenbo Zhang, Yuqi Lin, Shuo Liu, Jiayi Lei, Quanfeng Lu, Runjian Chen, Peng Xu, Renrui Zhang, Haozhe Zhang, Peng Gao, Yali Wang, Yu Qiao, Ping Luo, Kaipeng Zhang, and Wenqi Shao. Mmt-bench: A comprehensive multimodal benchmark for evaluating large vision-language models towards multitask agi. arXiv preprint arXiv:2404.16006, 2024. 9, 11 +[138] Weihao Yu, Zhengyuan Yang, Linjie Li, Jianfeng Wang, Kevin Lin, Zicheng Liu, Xinchao Wang, and Lijuan Wang. Mm-vet: Evaluating large multimodal models for integrated capabilities. arXiv preprint arXiv:2308.02490, 2023. 12 +[139] Weihao Yu, Zhengyuan Yang, Linfeng Ren, Linjie Li, Jianfeng Wang, Kevin Lin, Chung-Ching Lin, Zicheng Liu, Lijuan Wang, and Xinchao Wang. Mm-vet2: A challenging benchmark to evaluate large multimodal models for integrated capabilities. arXiv preprint arXiv:2408.00765, 2024. 12 +[140] Ya-Qi Yu, Minghui Liao, Jiwen Zhang, and Jihao Wu. Texthawk2: A large vision-language model excels in bilingualOCR and grounding with 16x fewer tokens. arXiv preprint arXiv:2410.05261, 2024. 13 + +[141] Xiang Yue, Yuansheng Ni, Kai Zhang, Tianyu Zheng, Ruoqi Liu, Ge Zhang, Samuel Stevens, Dongfu Jiang, Weiming Ren, Yuxuan Sun, et al. Mmmu: A massive multi-discipline multimodal understanding and reasoning benchmark for expert agi. arXiv preprint arXiv:2311.16502, 2023. 2, 7, 8, 9 +[142] Rowan Zellers, Ari Holtzman, Yonatan Bisk, Ali Farhadi, and Yejin Choi. Hellaswag: Can a machine really finish your sentence? In Proceedings of the Annual Meeting of the Association for Computational Linguistics, pages 4791-4800, 2019. 16 +[143] Haotian Zhang, Mingfei Gao, Zhe Gan, Philipp Dufter, Nina Wenzel, Forrest Huang, Dhruti Shah, Xianzhi Du, Bowen Zhang, Yanghao Li, et al. Mm1.5: Methods, analysis & insights from multimodal llm fine-tuning. arXiv preprint arXiv:2409.20566, 2024. 13 +[144] Haotian Zhang, Haoxuan You, Philipp Dufter, Bowen Zhang, Chen Chen, Hong-You Chen, Tsu-Jui Fu, William Yang Wang, Shih-Fu Chang, Zhe Gan, et al. Ferret-v2: An improved baseline for referring and grounding with large language models. arXiv preprint arXiv:2404.07973, 2024. 13 +[145] Peiyuan Zhang, Kaichen Zhang, Bo Li, Guangtao Zeng, Jingkang Yang, Yuanhan Zhang, Ziyue Wang, Haoran Tan, Chunyuan Li, and Ziwei Liu. Long context transfer from language to vision. arXiv preprint arXiv:2406.16852, 2024. 16 +[146] Renrui Zhang, Dongzhi Jiang, Yichi Zhang, Haokun Lin, Ziyu Guo, Pengshuo Qiu, Aojun Zhou, Pan Lu, Kai-Wei Chang, Peng Gao, et al. Mathverse: Does your multi-modal llm truly see the diagrams in visual math problems? arXiv preprint arXiv:2403.14624, 2024.8, 9 +[147] Renrui Zhang, Xinyu Wei, Dongzhi Jiang, Yichi Zhang, Ziyu Guo, Chengzhuo Tong, Jiaming Liu, Aojun Zhou, Bin Wei, Shanghang Zhang, et al. Mavis: Mathematical visual instruction tuning. arXiv preprint arXiv:2407.08739, 2024.6 +[148] Tianyu Zhang, Suyuchen Wang, Lu Li, Ge Zhang, Perouz Taslakian, Sai Rajeswar, Jie Fu, Bang Liu, and Yoshua Bengio. Vcr: Visual caption restoration. arXiv preprint arXiv:2406.06462, 2024. 8, 10 +[149] Xiaotian Zhang, Chunyang Li, Yi Zong, Zhengyu Ying, Liang He, and Xipeng Qiu. Evaluating the performance of large language models on gaokao benchmark. arXiv preprint arXiv:2305.12474, 2023. 16 +[150] Y Zhang, B Li, H Liu, Y Lee, L Gui, D Fu, J Feng, Z Liu, and C Li. Llava next: A strong zero-shot video understanding model. 2024. 16 +[151] Yi-Fan Zhang, Huanyu Zhang, Haochen Tian, Chaoyou Fu, Shuangqing Zhang, Junfei Wu, Feng Li, Kun Wang, Qingsong Wen, Zhang Zhang, et al. Mme-realworld: Could your multimodal llm challenge high-resolution real-world scenarios that are difficult for humans? arXiv preprint arXiv:2408.13257, 2024. 11 +[152] Zhenru Zhang, Chujie Zheng, Yangzhen Wu, Beichen Zhang, Runji Lin, Bowen Yu, Dayiheng Liu, Jingren Zhou, and Junyang Lin. The lessons of developing process reward models in mathematical reasoning. arXiv preprint arXiv:2501.07301, 2025. 7 +[153] Bingchen Zhao, Yongshuo Zong, Letian Zhang, and Timothy Hospedales. Benchmarking multi-image understanding in vision and language models: Perception, knowledge, reasoning, and multi-hop reasoning. arXiv preprint arXiv:2406.12742, 2024. 9, 11 +[154] Junjie Zhou, Yan Shu, Bo Zhao, Boya Wu, Shitao Xiao, Xi Yang, Yongping Xiong, Bo Zhang, Tiejun Huang, and Zheng Liu. Mlvu: A comprehensive benchmark for multi-task long video understanding. arXiv preprint arXiv:2406.04264, 2024. 14, 15 +[155] Chengke Zou, Xingang Guo, Rui Yang, Junyu Zhang, Bin Hu, and Huan Zhang. Dynamath: A dynamic visual benchmark for evaluating mathematical reasoning robustness of vision language models. arXiv preprint arXiv:2411.00836, 2024. 8, 9 \ No newline at end of file diff --git a/data/2025/2504_10xxx/2504.10479/images/161d8384d9e1a45f1bfa6d5a124a923c74d772f4c9ab302fb98daff5df6aad86.jpg b/data/2025/2504_10xxx/2504.10479/images/161d8384d9e1a45f1bfa6d5a124a923c74d772f4c9ab302fb98daff5df6aad86.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ef28cd899b67a3adcad8f8f20102adba7ff4c688 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10479/images/161d8384d9e1a45f1bfa6d5a124a923c74d772f4c9ab302fb98daff5df6aad86.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:71badf3fdfc19b713742cdafcc28bb0130c1ba3a2f66f725ff4d1bd9c5e01e05 +size 106114 diff --git a/data/2025/2504_10xxx/2504.10479/images/2543074654573cacd7d214bda38adddfa3eca6683b87e7e4c38e54f1a78f3548.jpg b/data/2025/2504_10xxx/2504.10479/images/2543074654573cacd7d214bda38adddfa3eca6683b87e7e4c38e54f1a78f3548.jpg new file mode 100644 index 0000000000000000000000000000000000000000..38bf6fc8c1cc782dde08b95896f4050756c3c6b9 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10479/images/2543074654573cacd7d214bda38adddfa3eca6683b87e7e4c38e54f1a78f3548.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e8a290dede09ddfa6fa8c2ff8e6c02255d07619d16a7dd7638a64e4a0c9410ce +size 110604 diff --git a/data/2025/2504_10xxx/2504.10479/images/26106948eeedbc5699b023a9f15b87fe04a4a87aaecf79a11bf90a130bc8f305.jpg b/data/2025/2504_10xxx/2504.10479/images/26106948eeedbc5699b023a9f15b87fe04a4a87aaecf79a11bf90a130bc8f305.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b77c8c488efda97f8990257b073257737c2c1053 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10479/images/26106948eeedbc5699b023a9f15b87fe04a4a87aaecf79a11bf90a130bc8f305.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:72cc786c2e3884904b5fa3cc2a9b57d147b772c0fa0a8ebb8bf66d1a6423e2a1 +size 8498 diff --git a/data/2025/2504_10xxx/2504.10479/images/2bff5625c2428d27d5a5ebbba8c54ea1547acd55e9e07eca1d80f9348b3b9394.jpg b/data/2025/2504_10xxx/2504.10479/images/2bff5625c2428d27d5a5ebbba8c54ea1547acd55e9e07eca1d80f9348b3b9394.jpg new file mode 100644 index 0000000000000000000000000000000000000000..012916c53c0cd0c898fcd5f911902bf3a80d4334 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10479/images/2bff5625c2428d27d5a5ebbba8c54ea1547acd55e9e07eca1d80f9348b3b9394.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8a80fe23c3d0d0fdaab7d9420e2567bf94e4721ebaf1dd9bb17afa5a91a1712b +size 7206 diff --git a/data/2025/2504_10xxx/2504.10479/images/38f44bcbaf1d9cf8b391869abd3803e6dde210f593578e499d64e4125c709616.jpg b/data/2025/2504_10xxx/2504.10479/images/38f44bcbaf1d9cf8b391869abd3803e6dde210f593578e499d64e4125c709616.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f4d344fe8a7f085146bcdb70457626d797b1b59a --- /dev/null +++ b/data/2025/2504_10xxx/2504.10479/images/38f44bcbaf1d9cf8b391869abd3803e6dde210f593578e499d64e4125c709616.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bc40f31082c83d0bb61a294b3c01e6d09194dfa2a75b7e465b385892c9e83115 +size 245355 diff --git a/data/2025/2504_10xxx/2504.10479/images/4176fc7555918689b16ab5d68173c5f141920017a405d4eef3a48bf0f90258c7.jpg b/data/2025/2504_10xxx/2504.10479/images/4176fc7555918689b16ab5d68173c5f141920017a405d4eef3a48bf0f90258c7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9c42afa2b862d992cbf3fab259a59f51d905038c --- /dev/null +++ b/data/2025/2504_10xxx/2504.10479/images/4176fc7555918689b16ab5d68173c5f141920017a405d4eef3a48bf0f90258c7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:62a8450d26032985c75c95aaee410b58dfac3f68feb7e2a7e00cf461b4cf6d65 +size 159547 diff --git a/data/2025/2504_10xxx/2504.10479/images/4b335ff589f54e65900f04f73a3dd478322dcb3034ef6ea2f9bcd3140fa3f198.jpg b/data/2025/2504_10xxx/2504.10479/images/4b335ff589f54e65900f04f73a3dd478322dcb3034ef6ea2f9bcd3140fa3f198.jpg new file mode 100644 index 0000000000000000000000000000000000000000..55c72c1568d9d094105766f42f779908be4600da --- /dev/null +++ b/data/2025/2504_10xxx/2504.10479/images/4b335ff589f54e65900f04f73a3dd478322dcb3034ef6ea2f9bcd3140fa3f198.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4bc9525b7b6c93ea258c8fc9066fbb60a8a1326df897c0f9b9d216a5d8739cd0 +size 7215 diff --git a/data/2025/2504_10xxx/2504.10479/images/4dc344ceba0082d124ada389f229dd7aa3fbe789254ac6aab91cf6b16f3c9dcf.jpg b/data/2025/2504_10xxx/2504.10479/images/4dc344ceba0082d124ada389f229dd7aa3fbe789254ac6aab91cf6b16f3c9dcf.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a7d979499d457399cadb54b02113bcf901074c70 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10479/images/4dc344ceba0082d124ada389f229dd7aa3fbe789254ac6aab91cf6b16f3c9dcf.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:61dfcb2b961ccd8c81dfc61f6a21e0b90b70c50f842e5270921afccbb442f416 +size 250501 diff --git a/data/2025/2504_10xxx/2504.10479/images/63b137b26d089da6496b7caf50f6a13a7945a49735feef9c70d2644edfc567f2.jpg b/data/2025/2504_10xxx/2504.10479/images/63b137b26d089da6496b7caf50f6a13a7945a49735feef9c70d2644edfc567f2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a12afdef56ea6c1e949f19d3fe3072c672c04cb8 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10479/images/63b137b26d089da6496b7caf50f6a13a7945a49735feef9c70d2644edfc567f2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e539d21d12d7fb0aba54bfff51b9526af7da5292cffd66c1ba07e6d700f45554 +size 207281 diff --git a/data/2025/2504_10xxx/2504.10479/images/64d16e3afefe4dacafd737d1aa038b03d98fb9ed727577c0deda1cb366e1d542.jpg b/data/2025/2504_10xxx/2504.10479/images/64d16e3afefe4dacafd737d1aa038b03d98fb9ed727577c0deda1cb366e1d542.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6b211727005c0af724d3de6f2379cdda71f36ac5 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10479/images/64d16e3afefe4dacafd737d1aa038b03d98fb9ed727577c0deda1cb366e1d542.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c7a39474432b808f4050f0815deb7f158a4ca47acf63be96555ecce58448dafc +size 79353 diff --git a/data/2025/2504_10xxx/2504.10479/images/67b065acbd523b3220d6a82f9de75ef6a8a324eb41a9f7e7145e4a1e352b0451.jpg b/data/2025/2504_10xxx/2504.10479/images/67b065acbd523b3220d6a82f9de75ef6a8a324eb41a9f7e7145e4a1e352b0451.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1c25720a434e1c416a390e45983fb5ab5da0d950 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10479/images/67b065acbd523b3220d6a82f9de75ef6a8a324eb41a9f7e7145e4a1e352b0451.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:692b4d81aa783dba1d1689d631381ef6f66afd8a35a494fc5ff575f8f4254673 +size 8556 diff --git a/data/2025/2504_10xxx/2504.10479/images/69dc7e20d347fac7561af47567c628a75bc55d51a0710f5aaf010d6f03495c58.jpg b/data/2025/2504_10xxx/2504.10479/images/69dc7e20d347fac7561af47567c628a75bc55d51a0710f5aaf010d6f03495c58.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3b6f112fba3fc4a46c59b9a8451c7081fe3aa2c1 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10479/images/69dc7e20d347fac7561af47567c628a75bc55d51a0710f5aaf010d6f03495c58.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:17687b2d8d068a3d6a255071ab2d0d7d71367d785f230810f8e6a50201176b61 +size 6459 diff --git a/data/2025/2504_10xxx/2504.10479/images/6bc83e896e053550faf383149197753b306b3c76e1820e010125d7ed48a15de9.jpg b/data/2025/2504_10xxx/2504.10479/images/6bc83e896e053550faf383149197753b306b3c76e1820e010125d7ed48a15de9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..555063c5ccb4384db8d95ebe40cbe7ea01a198ed --- /dev/null +++ b/data/2025/2504_10xxx/2504.10479/images/6bc83e896e053550faf383149197753b306b3c76e1820e010125d7ed48a15de9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9e487de78835ab361356ca51ec054ca9d7e953e0b5fd60aba359521181c5833a +size 58184 diff --git a/data/2025/2504_10xxx/2504.10479/images/6d60b392f37102e7852baf467313d21be5a78ccf54c1cab110baa1eacd2d5320.jpg b/data/2025/2504_10xxx/2504.10479/images/6d60b392f37102e7852baf467313d21be5a78ccf54c1cab110baa1eacd2d5320.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6c2307bb004929c2e428ec3f155866c42658a8fa --- /dev/null +++ b/data/2025/2504_10xxx/2504.10479/images/6d60b392f37102e7852baf467313d21be5a78ccf54c1cab110baa1eacd2d5320.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1f79d0da11a883809b0be6f2bd8a5c63862a18fc4112b8dcf36c2f7630c1c495 +size 6646 diff --git a/data/2025/2504_10xxx/2504.10479/images/6dc2a72e211edaa712e7c3f6af8295472eb9751fb7d841b2f4c7f75b232c901e.jpg b/data/2025/2504_10xxx/2504.10479/images/6dc2a72e211edaa712e7c3f6af8295472eb9751fb7d841b2f4c7f75b232c901e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4b247e22cc63fea976ae72eea8759ab74e0759cc --- /dev/null +++ b/data/2025/2504_10xxx/2504.10479/images/6dc2a72e211edaa712e7c3f6af8295472eb9751fb7d841b2f4c7f75b232c901e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a0fd957741193d6688f600991eb7318647335b3fd3694ba97a5e34992712bb41 +size 8999 diff --git a/data/2025/2504_10xxx/2504.10479/images/72726958353c8d32ccc1a02d8065ac4177a8f911d8dbe5434805ed363de54cb0.jpg b/data/2025/2504_10xxx/2504.10479/images/72726958353c8d32ccc1a02d8065ac4177a8f911d8dbe5434805ed363de54cb0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1169a40e07a973d37a5f90a78da4625c31c8bc0b --- /dev/null +++ b/data/2025/2504_10xxx/2504.10479/images/72726958353c8d32ccc1a02d8065ac4177a8f911d8dbe5434805ed363de54cb0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f84d698c6629042fb9a7087154b9c635bef24807525707a58582c4dbcd76d1cf +size 7199 diff --git a/data/2025/2504_10xxx/2504.10479/images/75d2c78becd00bea4f6c06cca69425a7971e2b6d3a0e905b5929f558bbfb943b.jpg b/data/2025/2504_10xxx/2504.10479/images/75d2c78becd00bea4f6c06cca69425a7971e2b6d3a0e905b5929f558bbfb943b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..81f59c9cdb2cce78203fd804213e7020e4ea4f93 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10479/images/75d2c78becd00bea4f6c06cca69425a7971e2b6d3a0e905b5929f558bbfb943b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bd681c015cb718525a663f0c6ce407788368c1adbc82c206aaf3af4713fbbd1f +size 5792 diff --git a/data/2025/2504_10xxx/2504.10479/images/7c717a81f7447af860e1053745eb32f2b52b80bae0d116059fd98f31dcf80959.jpg b/data/2025/2504_10xxx/2504.10479/images/7c717a81f7447af860e1053745eb32f2b52b80bae0d116059fd98f31dcf80959.jpg new file mode 100644 index 0000000000000000000000000000000000000000..33ed270a436899fdf41b18dfa966126b087b380e --- /dev/null +++ b/data/2025/2504_10xxx/2504.10479/images/7c717a81f7447af860e1053745eb32f2b52b80bae0d116059fd98f31dcf80959.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:18579b6dc5e472e4f4adbd6178ac15dfd5c22d43b1b0661b94241eb454de42c5 +size 3826 diff --git a/data/2025/2504_10xxx/2504.10479/images/7dfb9f75ca6b528ec382461cc9c8557cfaeffdfdd09cb987fa67c8df23797837.jpg b/data/2025/2504_10xxx/2504.10479/images/7dfb9f75ca6b528ec382461cc9c8557cfaeffdfdd09cb987fa67c8df23797837.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2c287361db8f4c40fba104108b6fc75c6f13f6c5 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10479/images/7dfb9f75ca6b528ec382461cc9c8557cfaeffdfdd09cb987fa67c8df23797837.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ee8ab8bae3bf186eccb7e13c232f388ac1e96ac24b2585304938c284b27ec47b +size 238778 diff --git a/data/2025/2504_10xxx/2504.10479/images/82fc116de0b0bf8c97c7d043c9af4f27e7a29697d82db4c93020c1d15f127367.jpg b/data/2025/2504_10xxx/2504.10479/images/82fc116de0b0bf8c97c7d043c9af4f27e7a29697d82db4c93020c1d15f127367.jpg new file mode 100644 index 0000000000000000000000000000000000000000..55f64e5982c5c9802c8470bf65463e07ccf08b82 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10479/images/82fc116de0b0bf8c97c7d043c9af4f27e7a29697d82db4c93020c1d15f127367.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1e920fc7c3c179e9ea53d5e6198a24c9d65ee17f4e6a00145140b225c6735a35 +size 229555 diff --git a/data/2025/2504_10xxx/2504.10479/images/9263bfa8ce115702226a96743252c258021d6cf36e1e18edfe8bbaaebc0a2882.jpg b/data/2025/2504_10xxx/2504.10479/images/9263bfa8ce115702226a96743252c258021d6cf36e1e18edfe8bbaaebc0a2882.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d41a2b92678c880bc1686661b6d4b09c40c2534d --- /dev/null +++ b/data/2025/2504_10xxx/2504.10479/images/9263bfa8ce115702226a96743252c258021d6cf36e1e18edfe8bbaaebc0a2882.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:63e329a10ffae314e4738c4b3bbe0bd974a7e95baf91097958b226187e6d4c23 +size 97124 diff --git a/data/2025/2504_10xxx/2504.10479/images/99fb5780bbc77cb5290472a8cf40013dc29034a545624543f2d14aef20cdbdeb.jpg b/data/2025/2504_10xxx/2504.10479/images/99fb5780bbc77cb5290472a8cf40013dc29034a545624543f2d14aef20cdbdeb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..cf4f3f50eeafbea685e48c196b89b48f0b0abd7b --- /dev/null +++ b/data/2025/2504_10xxx/2504.10479/images/99fb5780bbc77cb5290472a8cf40013dc29034a545624543f2d14aef20cdbdeb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e2388dd724ed49477eb308b1923afa64e04e80b837f1781e4ac8c3aa536a7c10 +size 3414 diff --git a/data/2025/2504_10xxx/2504.10479/images/a882683b95d10eac481fb7abb163436df7ccda6ca735de98d6fb0212917f3480.jpg b/data/2025/2504_10xxx/2504.10479/images/a882683b95d10eac481fb7abb163436df7ccda6ca735de98d6fb0212917f3480.jpg new file mode 100644 index 0000000000000000000000000000000000000000..170e714e130abee77b33ab53fb41f8a7668bb48e --- /dev/null +++ b/data/2025/2504_10xxx/2504.10479/images/a882683b95d10eac481fb7abb163436df7ccda6ca735de98d6fb0212917f3480.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b595c32e7d6f10ecf6bf270c923d2b20393eea1d3f67ffd94a6b747b0af60f36 +size 65198 diff --git a/data/2025/2504_10xxx/2504.10479/images/c59cf6f8b14dce70a608cfda365f8d999fdb86411a13c8775b6106a10a119b34.jpg b/data/2025/2504_10xxx/2504.10479/images/c59cf6f8b14dce70a608cfda365f8d999fdb86411a13c8775b6106a10a119b34.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6f3976856e138ffe8ad45f622bf4978013222f78 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10479/images/c59cf6f8b14dce70a608cfda365f8d999fdb86411a13c8775b6106a10a119b34.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ef63b1517687613120144df55b71e5b200ecbb94d76e4032252f78fa862d949f +size 309361 diff --git a/data/2025/2504_10xxx/2504.10479/images/e2e35c7fd7d71c77b1871156fb34e9db2011cc2fa2a0bf40b9c3f1cf96176906.jpg b/data/2025/2504_10xxx/2504.10479/images/e2e35c7fd7d71c77b1871156fb34e9db2011cc2fa2a0bf40b9c3f1cf96176906.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3e1b6f3c71b5c30e5e8204caa387e5369ea5b461 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10479/images/e2e35c7fd7d71c77b1871156fb34e9db2011cc2fa2a0bf40b9c3f1cf96176906.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:90adb5f5ca5093942328fc8041ff15b457dd864463bf381af4cea4b6671125d7 +size 4077 diff --git a/data/2025/2504_10xxx/2504.10479/images/e5d9a7d6e77380d50286b1398b5cb57b706a8a9a17883f62629000da5c604ef9.jpg b/data/2025/2504_10xxx/2504.10479/images/e5d9a7d6e77380d50286b1398b5cb57b706a8a9a17883f62629000da5c604ef9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a2e1b70f170825caeeb5cfcaac1b47efd427ed75 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10479/images/e5d9a7d6e77380d50286b1398b5cb57b706a8a9a17883f62629000da5c604ef9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:28e278ea2fb93802838345e6e0896efee129f10ec049b0ca3ba40758dc503fdd +size 27846 diff --git a/data/2025/2504_10xxx/2504.10479/images/e9f1b1a5e5595c43873b6029f623d54746bdb26df212ceb45762e15ae5a818cd.jpg b/data/2025/2504_10xxx/2504.10479/images/e9f1b1a5e5595c43873b6029f623d54746bdb26df212ceb45762e15ae5a818cd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b08032147631a469cadfbe054b424fa9a1e936c1 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10479/images/e9f1b1a5e5595c43873b6029f623d54746bdb26df212ceb45762e15ae5a818cd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:95dfce16f5b21184ca397a0b3c152d542277867ed69841b9fa421a13db2163bc +size 11494 diff --git a/data/2025/2504_10xxx/2504.10479/images/ec927fbd80730a141c1d6b95f58d4e1e79c5b82d743ca43baae18e32892499e3.jpg b/data/2025/2504_10xxx/2504.10479/images/ec927fbd80730a141c1d6b95f58d4e1e79c5b82d743ca43baae18e32892499e3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ba6885ceaffb88afcede999c75ef5b637bd2e4b9 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10479/images/ec927fbd80730a141c1d6b95f58d4e1e79c5b82d743ca43baae18e32892499e3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a77bf41a196a0fe96898822d3a9e77140928851f314d46610475a7bd19d648c1 +size 85395 diff --git a/data/2025/2504_10xxx/2504.10479/images/f5286d02710ce84555dfeebe50c2c99b828fdae3c8f2d0f77f6a37f2af63c8b8.jpg b/data/2025/2504_10xxx/2504.10479/images/f5286d02710ce84555dfeebe50c2c99b828fdae3c8f2d0f77f6a37f2af63c8b8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..162324db3c3a7f1576547f07006c6fa62c6b549d --- /dev/null +++ b/data/2025/2504_10xxx/2504.10479/images/f5286d02710ce84555dfeebe50c2c99b828fdae3c8f2d0f77f6a37f2af63c8b8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:39eda451d97244453011343bbad9effe01246788fd96970d0ec370b0692fb030 +size 172755 diff --git a/data/2025/2504_10xxx/2504.10479/images/fc1ade31da6480ea486fb3150830af1b4f921e178c54bb077cec9b471f43e941.jpg b/data/2025/2504_10xxx/2504.10479/images/fc1ade31da6480ea486fb3150830af1b4f921e178c54bb077cec9b471f43e941.jpg new file mode 100644 index 0000000000000000000000000000000000000000..326053826bfedf07af431c23be071bb43f2ff437 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10479/images/fc1ade31da6480ea486fb3150830af1b4f921e178c54bb077cec9b471f43e941.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:76bdc194969ba551e3abf825eb20c6a79bbabceccce7757ef3192c5e54371b52 +size 3024 diff --git a/data/2025/2504_10xxx/2504.10479/layout.json b/data/2025/2504_10xxx/2504.10479/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..901dec0e6d6a174288ce99604a853f103fb96fda --- /dev/null +++ b/data/2025/2504_10xxx/2504.10479/layout.json @@ -0,0 +1,14499 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 94, + 97, + 516, + 137 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 94, + 97, + 516, + 137 + ], + "spans": [ + { + "bbox": [ + 94, + 97, + 516, + 137 + ], + "type": "text", + "content": "InternVL3: Exploring Advanced Training and Test-Time Recipes for Open-Source Multimodal Models" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "spans": [ + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "text", + "content": "Jinguo Zhu" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "inline_equation", + "content": "^{1*}" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "text", + "content": ", Weiyun Wang" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "inline_equation", + "content": "^{5,1*†}" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "text", + "content": ", Zhe Chen" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "inline_equation", + "content": "^{4,1*†}" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "text", + "content": ", Zhaoyang Liu" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "inline_equation", + "content": "^{1*†}" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "text", + "content": ", Shenglong Ye" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "inline_equation", + "content": "^{1*}" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "text", + "content": ", Lixin Gu" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "inline_equation", + "content": "^{1*}" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "text", + "content": ", Hao Tian" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "inline_equation", + "content": "^{2*}" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "text", + "content": ", Yuchen Duan" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "inline_equation", + "content": "^{6,1*†}" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "text", + "content": ", Weijie Su" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "text", + "content": ", Jie Shao" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "inline_equation", + "content": "^{4,1†}" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "text", + "content": ", Zhangwei Gao" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "inline_equation", + "content": "^{7,1†}" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "text", + "content": ", Erfei Cui" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "inline_equation", + "content": "^{7,1†}" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "text", + "content": ", Xuehui Wang" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "inline_equation", + "content": "^{7,1†}" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "text", + "content": ", Yue Cao" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "inline_equation", + "content": "^{4,1†}" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "text", + "content": ", Yangzhou Liu" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "inline_equation", + "content": "^{4,1†}" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "text", + "content": ", Xingguang Wei" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "inline_equation", + "content": "^{1†}" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "text", + "content": ", Hongjie Zhang" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "text", + "content": ", Haomin Wang" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "inline_equation", + "content": "^{7,1†}" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "text", + "content": ", Weiye Xu" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "inline_equation", + "content": "^{1†}" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "text", + "content": ", Hao Li" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "inline_equation", + "content": "^{1†}" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "text", + "content": ", Jiahao Wang" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "inline_equation", + "content": "^{1†}" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "text", + "content": ", Nianchen Deng" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "text", + "content": ", Songze Li" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "text", + "content": ", Yinan He" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "text", + "content": ", Tan Jiang" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "text", + "content": ", Jiapeng Luo" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "text", + "content": ", Yi Wang" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "text", + "content": ", Conghui He" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "text", + "content": ", Botian Shi" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "text", + "content": ", Xingcheng Zhang" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "text", + "content": ", Wenqi Shao" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "text", + "content": ", Junjun He" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "text", + "content": ", Yingtong Xiong" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "text", + "content": ", Wenwen Qu" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "text", + "content": ", Peng Sun" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "text", + "content": ", Penglong Jiao" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "text", + "content": ", Han Lv" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "text", + "content": ", Lijun Wu" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "text", + "content": ", Kaipeng Zhang" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "text", + "content": ", Huipeng Deng" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "text", + "content": ", Jiaye Ge" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "text", + "content": ", Kai Chen" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "text", + "content": ", Limin Wang" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "inline_equation", + "content": "^{4,1}" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "text", + "content": ", Min Dou" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "text", + "content": ", Lewei Lu" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "text", + "content": ", Xizhou Zhu" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "inline_equation", + "content": "^{3,1}" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "text", + "content": ", Tong Lu" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "text", + "content": ", Dahua Lin" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "inline_equation", + "content": "^{6,1}" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "text", + "content": ", Yu Qiao" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "text", + "content": ", Jifeng Dai" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "inline_equation", + "content": "^{3,1‡}" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "text", + "content": ", Wenhai Wang" + }, + { + "bbox": [ + 102, + 178, + 511, + 257 + ], + "type": "inline_equation", + "content": "^{6,1‡}" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 118, + 258, + 487, + 283 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 258, + 487, + 283 + ], + "spans": [ + { + "bbox": [ + 118, + 258, + 487, + 283 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 118, + 258, + 487, + 283 + ], + "type": "text", + "content": "Shanghai AI Laboratory " + }, + { + "bbox": [ + 118, + 258, + 487, + 283 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 118, + 258, + 487, + 283 + ], + "type": "text", + "content": "SenseTime Research " + }, + { + "bbox": [ + 118, + 258, + 487, + 283 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 118, + 258, + 487, + 283 + ], + "type": "text", + "content": "Tsinghua University " + }, + { + "bbox": [ + 118, + 258, + 487, + 283 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 118, + 258, + 487, + 283 + ], + "type": "text", + "content": "Nanjing University " + }, + { + "bbox": [ + 118, + 258, + 487, + 283 + ], + "type": "inline_equation", + "content": "^{5}" + }, + { + "bbox": [ + 118, + 258, + 487, + 283 + ], + "type": "text", + "content": "Fudan University " + }, + { + "bbox": [ + 118, + 258, + 487, + 283 + ], + "type": "inline_equation", + "content": "^{6}" + }, + { + "bbox": [ + 118, + 258, + 487, + 283 + ], + "type": "text", + "content": "The Chinese University of Hong Kong " + }, + { + "bbox": [ + 118, + 258, + 487, + 283 + ], + "type": "inline_equation", + "content": "^{7}" + }, + { + "bbox": [ + 118, + 258, + 487, + 283 + ], + "type": "text", + "content": "Shanghai Jiao Tong University" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 192, + 293, + 418, + 304 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 192, + 293, + 418, + 304 + ], + "spans": [ + { + "bbox": [ + 192, + 293, + 418, + 304 + ], + "type": "text", + "content": "Code: https://github.com/OpenGVLab/InternVL" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 166, + 304, + 444, + 314 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 304, + 444, + 314 + ], + "spans": [ + { + "bbox": [ + 166, + 304, + 444, + 314 + ], + "type": "text", + "content": "Model: https://huggingface.co/OpenGVLab/InternVL3-78B" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 145, + 316, + 465, + 326 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 316, + 465, + 326 + ], + "spans": [ + { + "bbox": [ + 145, + 316, + 465, + 326 + ], + "type": "text", + "content": "Data: https://huggingface.co/datasets/OpenGVLab/InternVL-Data" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 281, + 370, + 329, + 383 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 281, + 370, + 329, + 383 + ], + "spans": [ + { + "bbox": [ + 281, + 370, + 329, + 383 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 118, + 396, + 492, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 396, + 492, + 594 + ], + "spans": [ + { + "bbox": [ + 118, + 396, + 492, + 594 + ], + "type": "text", + "content": "We introduce InternVL3, a significant advancement in the InternVL series featuring a native multimodal pre-training paradigm. Rather than adapting a text-only large language model (LLM) into a multimodal large language model (MLLM) that supports visual inputs, InternVL3 jointly acquires multimodal and linguistic capabilities from both diverse multimodal data and pure-text corpora during a single pre-training stage. This unified training paradigm effectively addresses the complexities and alignment challenges commonly encountered in conventional post-hoc training pipelines for MLLMs. To further improve performance and scalability, InternVL3 incorporates variable visual position encoding (V2PE) to support extended multimodal contexts, employs advanced post-training techniques such as supervised fine-tuning (SFT) and mixed preference optimization (MPO), and adopts test-time scaling strategies alongside an optimized training infrastructure. Extensive empirical evaluations demonstrate that InternVL3 delivers superior performance across a wide range of multi-modal tasks. In particular, InternVL3-78B achieves a score of 72.2 on the MMMU benchmark, setting a new state-of-the-art among open-source MLLMs. Its capabilities remain highly competitive with leading proprietary models, including ChatGPT-4o, Claude 3.5 Sonnet, and Gemini 2.5 Pro, while also maintaining strong pure-language proficiency. In pursuit of open-science principles, we will publicly release both the training data and model weights to foster further research and development in next-generation MLLMs." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 83, + 613, + 169, + 625 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 613, + 169, + 625 + ], + "spans": [ + { + "bbox": [ + 83, + 613, + 169, + 625 + ], + "type": "text", + "content": "1 Introduction" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 82, + 637, + 528, + 694 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 637, + 528, + 694 + ], + "spans": [ + { + "bbox": [ + 82, + 637, + 528, + 694 + ], + "type": "text", + "content": "Multimodal large language models (MLLMs) [32, 66, 121, 21, 19, 123, 68, 114, 97, 136, 71, 31, 85, 117, 18, 89, 105, 69] have recently achieved or even surpassed human-level performance in a broad spectrum of tasks, underscoring their potential as a significant stride toward artificial general intelligence (AGI). Yet, the majority of leading MLLMs—both open-source and proprietary—are adapted from text-only large language models through sophisticated multi-stage pipelines [21, 19, 18, 5, 121, 7]. These “post-hoc” approaches are built upon the" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 14, + 208, + 37, + 561 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 208, + 37, + 561 + ], + "spans": [ + { + "bbox": [ + 14, + 208, + 37, + 561 + ], + "type": "text", + "content": "arXiv:2504.10479v3 [cs.CV] 19 Apr 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 83, + 700, + 383, + 722 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 700, + 383, + 722 + ], + "spans": [ + { + "bbox": [ + 83, + 700, + 383, + 722 + ], + "type": "text", + "content": "* equal contribution; † interns at OpenGVLab, Shanghai AI Laboratory; corresponding authors (daijifeng@tsinghua.edu.cn, wangwenhai@pjlab.org.cn)." + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 86, + 72, + 525, + 338 + ], + "blocks": [ + { + "bbox": [ + 86, + 72, + 525, + 338 + ], + "lines": [ + { + "bbox": [ + 86, + 72, + 525, + 338 + ], + "spans": [ + { + "bbox": [ + 86, + 72, + 525, + 338 + ], + "type": "table", + "html": "
InternVL2.5 78BInternVL3 8BInternVL3 78BQwen2.5-VL 72BOther Open-Source MLLMsClaude-3.5 SonnetChatGPT-4o-latestGemini-2.5 Pro
Model WeightsXXX
Training DataXX-XXX
MMMU Multi-discipline70.1%65.6%72.2% (2.1 ↑)70.2%64.5%66.4%72.9%74.7%
MathVista Math72.3%75.2%79.6% (7.3 ↑)74.8%70.5%65.1%71.6%80.9%
AI2D Diagrams89.1%85.2%89.7% (0.6 ↑)88.7%88.1%81.2%86.3%89.5%
ChartQA Charts88.3%86.6%89.7% (1.4 ↑)89.5%88.3%90.8%--
DocVQA Documents95.1%92.7%95.4% (0.3 ↑)96.4%96.5%95.2%--
InfographicVQA infographics84.1%76.8%85.2% (1.1 ↑)87.3%84.7%74.3%--
HallusionBench Hallucination57.4%49.9%59.1% (1.7 ↑)55.2%58.1%55.5%57.0%64.1%
OCRBench OCR854880906 (52↑)885877-894862
LongVideoBench Video63.6%58.8%65.7%(2.1↑)60.7%61.3%---
", + "image_path": "9263bfa8ce115702226a96743252c258021d6cf36e1e18edfe8bbaaebc0a2882.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 83, + 344, + 529, + 390 + ], + "lines": [ + { + "bbox": [ + 83, + 344, + 529, + 390 + ], + "spans": [ + { + "bbox": [ + 83, + 344, + 529, + 390 + ], + "type": "text", + "content": "Figure 1: Multimodal performance of the InternVL series and other advanced MLLMs. The InternVL series has consistently exhibited progressive enhancements in multimodal capabilities. The newly released InternVL3 significantly outperforms existing open-source MLLMs. Moreover, even in comparison with state-of-the-art closed-source commercial models, InternVL3 continues to demonstrate highly competitive performance." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 82, + 421, + 527, + 477 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 421, + 527, + 477 + ], + "spans": [ + { + "bbox": [ + 82, + 421, + 527, + 477 + ], + "type": "text", + "content": "original text-based pre-training processes, thereby introducing alignment challenges when integrating additional modalities such as vision. In practice, bridging modality gaps often necessitates incorporating auxiliary data from specialized domains (e.g., optical character recognition scenarios) and intricate parameter-freezing or multi-stage fine-tuning schedules to ensure that core linguistic capacities remain uncompromised [73, 7, 5, 18]. Such resource-intensive strategies highlight the need for more efficient multimodal training paradigms." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 82, + 481, + 526, + 548 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 481, + 526, + 548 + ], + "spans": [ + { + "bbox": [ + 82, + 481, + 526, + 548 + ], + "type": "text", + "content": "In this report, we introduce InternVL3, the latest milestone in the InternVL series [21, 20, 18], which is distinguished by its native multimodal pre-training strategy. Rather than first pre-training a text-only large language model and subsequently retrofitting it via multimodal alignment to support visual processing, InternVL3 learns multimodal capabilities from the pre-training stage by jointly exposed to both text-only corpora and diverse multimodal datasets. This unified approach enables the model to simultaneously acquire linguistic and multimodal competencies in a more efficient and integrated manner." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 82, + 552, + 527, + 609 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 552, + 527, + 609 + ], + "spans": [ + { + "bbox": [ + 82, + 552, + 527, + 609 + ], + "type": "text", + "content": "InternVL3 further excels through multiple innovations that reinforce both performance and scalability. We employ a variable visual position encoding (V2PE) mechanism [42] to accommodate longer multimodal contexts. Furthermore, advanced post-training strategies—comprising supervised fine-tuning (SFT) and mixed preference optimization (MPO) [124]—together with test-time scaling strategies [125] and an optimized training infrastructure [15], significantly enhance InternVL3's efficiency and performance." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 82, + 612, + 529, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 612, + 529, + 723 + ], + "spans": [ + { + "bbox": [ + 82, + 612, + 529, + 723 + ], + "type": "text", + "content": "Comprehensive empirical evaluations demonstrate that InternVL3 surpasses its predecessors (e.g., InternVL2.5 [18]) across a wide range of tasks, including multi-discipline reasoning, document understanding, multi-image / video understanding, real-world comprehension, multimodal hallucination detection, visual grounding, and multilingual capabilities. Notably, by incorporating expanded domain-specific datasets, InternVL3 also exhibits marked improvements in tool usage, GUI agents, industrial image analysis, and spatial reasoning, thus substantially extending the multimodal scenarios addressed by the InternVL series. It proves highly competitive with other open-source MLLMs such as Qwen2.5-VL [7] and remains on par with closed-source models (e.g., ChatGPT-4o [98], Claude-3.5 Sonnet [3], Gemini-2.5 Pro [117]). This versatility is evidenced by its 72.2-point performance on the MMMU benchmark [141], setting a new standard among open-source MLLMs. Additionally, InternVL3 demonstrates language capabilities comparable to other advanced LLMs of similar scale." + } + ] + } + ], + "index": 5 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 6 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 86, + 71, + 521, + 328 + ], + "blocks": [ + { + "bbox": [ + 86, + 71, + 521, + 328 + ], + "lines": [ + { + "bbox": [ + 86, + 71, + 521, + 328 + ], + "spans": [ + { + "bbox": [ + 86, + 71, + 521, + 328 + ], + "type": "image", + "image_path": "2543074654573cacd7d214bda38adddfa3eca6683b87e7e4c38e54f1a78f3548.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 82, + 334, + 529, + 380 + ], + "lines": [ + { + "bbox": [ + 82, + 334, + 529, + 380 + ], + "spans": [ + { + "bbox": [ + 82, + 334, + 529, + 380 + ], + "type": "text", + "content": "Figure 2: Performance of various MLLMs on the OpenCompass multimodal academic leaderboard. The enhanced InternVL series—InternVL3—demonstrates outstanding multimodal capabilities, significantly outperforming both the Qwen2.5-VL series and closed-source models such as Step-1o, GLM-4v-Plus, and GPT-4o. Remarkably, InternVL3-78B also remains highly competitive with the state-of-the-art Gemini-2.5-Pro." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 82, + 399, + 526, + 434 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 399, + 526, + 434 + ], + "spans": [ + { + "bbox": [ + 82, + 399, + 526, + 434 + ], + "type": "text", + "content": "To foster further advancements within the open-source community, we will release the training data1 and model weights alongside this work, thereby ensuring transparency and reproducibility for the continued development of next-generation MLLMs." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 83, + 448, + 161, + 462 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 448, + 161, + 462 + ], + "spans": [ + { + "bbox": [ + 83, + 448, + 161, + 462 + ], + "type": "text", + "content": "2 InternVL3" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 82, + 472, + 526, + 518 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 472, + 526, + 518 + ], + "spans": [ + { + "bbox": [ + 82, + 472, + 526, + 518 + ], + "type": "text", + "content": "Building upon the prior InternVL series [21, 19, 18], we propose InternVL3, a new generation within the InternVL model family. InternVL3 is specifically designed to streamline the training pipeline while significantly enhancing multimodal capabilities. In this section, we first delineate the core components of InternVL3, including its model architecture, training procedures, test-time scaling strategies, and infrastructure-level optimizations." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 83, + 529, + 194, + 542 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 529, + 194, + 542 + ], + "spans": [ + { + "bbox": [ + 83, + 529, + 194, + 542 + ], + "type": "text", + "content": "2.1 Model Architecture" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 82, + 550, + 526, + 572 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 550, + 526, + 572 + ], + "spans": [ + { + "bbox": [ + 82, + 550, + 526, + 572 + ], + "type": "text", + "content": "The architecture of InternVL3 follows the same general framework as its predecessors, adhering to the \"ViTMLP-LLM\" paradigm [66, 18, 41, 20]. Detailed architectural specifications are summarized in Table 1." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 82, + 577, + 526, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 577, + 526, + 677 + ], + "spans": [ + { + "bbox": [ + 82, + 577, + 526, + 677 + ], + "type": "text", + "content": "Although the native pre-training paradigm discussed later could enable training MLLMs from scratch, we choose to initialize the ViT and LLM components with pre-trained model weights to reduce computational costs. The vision encoder is available in two configurations: InternViT-300M and InternViT-6B. For the language model, we leverage pre-trained large language models (LLMs), specifically the Qwen2.5 series and InternLM3-8B. Importantly, our LLM components are initialized solely from pre-trained base models, without employing instruction-tuned variants. The multilayer perceptron (MLP) utilized in the model is a two-layer network with random initialization. In line with the approach taken in InternVL2.5, InternVL3 incorporates a pixel unshuffle operation to enhance scalability for processing high-resolution images. This operation reduces the visual token count to one-quarter of its original value, representing each " + }, + { + "bbox": [ + 82, + 577, + 526, + 677 + ], + "type": "inline_equation", + "content": "448 \\times 448" + }, + { + "bbox": [ + 82, + 577, + 526, + 677 + ], + "type": "text", + "content": " image tile with 256 visual tokens." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 82, + 681, + 526, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 681, + 526, + 704 + ], + "spans": [ + { + "bbox": [ + 82, + 681, + 526, + 704 + ], + "type": "text", + "content": "Variable Visual Position Encoding. InternVL3 also integrates the Variable Visual Position Encoding (V2PE) [42], which utilizes smaller, more flexible position increments for visual tokens. This modifica" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 96, + 711, + 525, + 723 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 711, + 525, + 723 + ], + "spans": [ + { + "bbox": [ + 96, + 711, + 525, + 723 + ], + "type": "text", + "content": "1The open-source data are being organized, and a comprehensive list will be included in a future revision of this report." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 115, + 70, + 492, + 158 + ], + "blocks": [ + { + "bbox": [ + 115, + 70, + 492, + 158 + ], + "lines": [ + { + "bbox": [ + 115, + 70, + 492, + 158 + ], + "spans": [ + { + "bbox": [ + 115, + 70, + 492, + 158 + ], + "type": "table", + "html": "
Model Name#ParamVision EncoderLanguage ModelOpenCompass Academic
InternVL3-1B0.9BInternViT-300M-448px-V2.5Qwen2.5-0.5B57.4
InternVL3-2B1.9BInternViT-300M-448px-V2.5Qwen2.5-1.5B63.9
InternVL3-8B8.1BInternViT-300M-448px-V2.5Qwen2.5-7B73.3
InternVL3-9B9.2BInternViT-300M-448px-V2.5InternLM3-8B72.4
InternVL3-14B15.1BInternViT-300M-448px-V2.5Qwen2.5-14B75.5
InternVL3-38B38.4BInternViT-6B-448px-V2.5Qwen2.5-32B77.3
InternVL3-78B78.4BInternViT-6B-448px-V2.5Qwen2.5-72B79.5
", + "image_path": "6bc83e896e053550faf383149197753b306b3c76e1820e010125d7ed48a15de9.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 83, + 165, + 527, + 189 + ], + "lines": [ + { + "bbox": [ + 83, + 165, + 527, + 189 + ], + "spans": [ + { + "bbox": [ + 83, + 165, + 527, + 189 + ], + "type": "text", + "content": "Table 1: Pre-trained models used in the InternVL3 series. The OpenCompass scores for the InternVL3 series were obtained through our local testing." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 82, + 216, + 527, + 240 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 216, + 527, + 240 + ], + "spans": [ + { + "bbox": [ + 82, + 216, + 527, + 240 + ], + "type": "text", + "content": "tion facilitates the handling of longer multimodal contexts without excessively extending the position window. Specifically, each training sample for the MLLM is represented as:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 255, + 244, + 526, + 258 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 255, + 244, + 526, + 258 + ], + "spans": [ + { + "bbox": [ + 255, + 244, + 526, + 258 + ], + "type": "interline_equation", + "content": "\\mathbf {x} = \\left(x _ {1}, x _ {2}, \\dots , x _ {L}\\right), \\tag {1}", + "image_path": "99fb5780bbc77cb5290472a8cf40013dc29034a545624543f2d14aef20cdbdeb.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 82, + 262, + 527, + 296 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 262, + 527, + 296 + ], + "spans": [ + { + "bbox": [ + 82, + 262, + 527, + 296 + ], + "type": "text", + "content": "where each token " + }, + { + "bbox": [ + 82, + 262, + 527, + 296 + ], + "type": "inline_equation", + "content": "x_{i}" + }, + { + "bbox": [ + 82, + 262, + 527, + 296 + ], + "type": "text", + "content": " can be a textual token embedding, a visual embedding, or another modality-specific representation (e.g., video patch embeddings). The position index " + }, + { + "bbox": [ + 82, + 262, + 527, + 296 + ], + "type": "inline_equation", + "content": "p_{i}" + }, + { + "bbox": [ + 82, + 262, + 527, + 296 + ], + "type": "text", + "content": " for any token " + }, + { + "bbox": [ + 82, + 262, + 527, + 296 + ], + "type": "inline_equation", + "content": "x_{i}" + }, + { + "bbox": [ + 82, + 262, + 527, + 296 + ], + "type": "text", + "content": " can be computed sequentially as follows:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 214, + 297, + 526, + 323 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 214, + 297, + 526, + 323 + ], + "spans": [ + { + "bbox": [ + 214, + 297, + 526, + 323 + ], + "type": "interline_equation", + "content": "p _ {i} = \\left\\{ \\begin{array}{l l} 0, & \\text {i f} i = 1, \\\\ f _ {\\text {p o s}} \\left(p _ {i - 1}, x _ {i}\\right), & \\text {f o r} i = 2, 3, \\dots , N. \\end{array} \\right. \\tag {2}", + "image_path": "69dc7e20d347fac7561af47567c628a75bc55d51a0710f5aaf010d6f03495c58.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 82, + 326, + 527, + 360 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 326, + 527, + 360 + ], + "spans": [ + { + "bbox": [ + 82, + 326, + 527, + 360 + ], + "type": "text", + "content": "In contrast to traditional MLLMs, where position indices increment uniformly by 1 for each token, irrespective of modality, V2PE employs a modality-specific recursive function for position index computation. This results in distinct position index assignments for textual and visual tokens:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 217, + 366, + 526, + 393 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 217, + 366, + 526, + 393 + ], + "spans": [ + { + "bbox": [ + 217, + 366, + 526, + 393 + ], + "type": "interline_equation", + "content": "p _ {i} = p _ {i - 1} + \\left\\{ \\begin{array}{l l} 1, & \\text {i f} x _ {i} \\text {i s a t e x t u a l t o k e n ,} \\\\ \\delta , & \\text {i f} x _ {i} \\text {i s a v i s u a l t o k e n ,} \\end{array} \\right. \\tag {3}", + "image_path": "67b065acbd523b3220d6a82f9de75ef6a8a324eb41a9f7e7145e4a1e352b0451.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 82, + 399, + 527, + 453 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 399, + 527, + 453 + ], + "spans": [ + { + "bbox": [ + 82, + 399, + 527, + 453 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 82, + 399, + 527, + 453 + ], + "type": "inline_equation", + "content": "\\delta" + }, + { + "bbox": [ + 82, + 399, + 527, + 453 + ], + "type": "text", + "content": " is a smaller increment (" + }, + { + "bbox": [ + 82, + 399, + 527, + 453 + ], + "type": "inline_equation", + "content": "\\delta < 1" + }, + { + "bbox": [ + 82, + 399, + 527, + 453 + ], + "type": "text", + "content": "), reducing the rate at which position indices increase for visual tokens. The standard increment of 1 is retained for textual tokens to preserve their positional distinctions. In line with the original V2PE design, we maintain that " + }, + { + "bbox": [ + 82, + 399, + 527, + 453 + ], + "type": "inline_equation", + "content": "\\delta" + }, + { + "bbox": [ + 82, + 399, + 527, + 453 + ], + "type": "text", + "content": " remains constant within a single image to preserve the relative positional relationships. During training, " + }, + { + "bbox": [ + 82, + 399, + 527, + 453 + ], + "type": "inline_equation", + "content": "\\delta" + }, + { + "bbox": [ + 82, + 399, + 527, + 453 + ], + "type": "text", + "content": " is randomly chosen for each image from a predefined set of fractional values:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 212, + 452, + 526, + 475 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 212, + 452, + 526, + 475 + ], + "spans": [ + { + "bbox": [ + 212, + 452, + 526, + 475 + ], + "type": "interline_equation", + "content": "\\delta \\in \\Delta = \\left\\{1, \\frac {1}{2}, \\frac {1}{4}, \\frac {1}{8}, \\frac {1}{1 6}, \\frac {1}{3 2}, \\frac {1}{6 4}, \\frac {1}{1 2 8}, \\frac {1}{2 5 6} \\right\\}. \\tag {4}", + "image_path": "2bff5625c2428d27d5a5ebbba8c54ea1547acd55e9e07eca1d80f9348b3b9394.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 82, + 476, + 527, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 476, + 527, + 510 + ], + "spans": [ + { + "bbox": [ + 82, + 476, + 527, + 510 + ], + "type": "text", + "content": "During inference, " + }, + { + "bbox": [ + 82, + 476, + 527, + 510 + ], + "type": "inline_equation", + "content": "\\delta" + }, + { + "bbox": [ + 82, + 476, + 527, + 510 + ], + "type": "text", + "content": " can be flexibly selected based on the input sequence length, enabling a balance between task performance and ensuring that position indices remain within the model's valid context range. Notably, when " + }, + { + "bbox": [ + 82, + 476, + 527, + 510 + ], + "type": "inline_equation", + "content": "\\delta = 1" + }, + { + "bbox": [ + 82, + 476, + 527, + 510 + ], + "type": "text", + "content": ", V2PE reverts to the conventional positional encoding used in InternVL2.5." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 83, + 522, + 248, + 534 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 522, + 248, + 534 + ], + "spans": [ + { + "bbox": [ + 83, + 522, + 248, + 534 + ], + "type": "text", + "content": "2.2 Native Multimodal Pre-Training" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 82, + 542, + 527, + 643 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 542, + 527, + 643 + ], + "spans": [ + { + "bbox": [ + 82, + 542, + 527, + 643 + ], + "type": "text", + "content": "We propose a native multimodal pre-training approach that consolidates language pre-training and multi-modal alignment training into a single pre-training stage. Unlike conventional paradigms—where a language-only large model is first trained (typically with language pre-training followed by language post-training) and subsequently adapted to accommodate additional modalities—our method performs integrated optimization by interleaving multimodal data (e.g., image-text, video-text, or interleaved image-text sequences) with large-scale textual corpora during the pre-training process. This unified training scheme enables the pre-trained model to learn both linguistic and multimodal capabilities simultaneously, ultimately enhancing its capability to handle vision-language tasks without introducing additional bridging modules or subsequent inter-model alignment procedures." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 83, + 651, + 527, + 686 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 651, + 527, + 686 + ], + "spans": [ + { + "bbox": [ + 83, + 651, + 527, + 686 + ], + "type": "text", + "content": "Multimodal Autoregressive Formulation. Let " + }, + { + "bbox": [ + 83, + 651, + 527, + 686 + ], + "type": "inline_equation", + "content": "\\mathcal{M}" + }, + { + "bbox": [ + 83, + 651, + 527, + 686 + ], + "type": "text", + "content": " denote a Transformer-based model parameterized by " + }, + { + "bbox": [ + 83, + 651, + 527, + 686 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 83, + 651, + 527, + 686 + ], + "type": "text", + "content": " that can process text, image, and video simultaneously. Specifically, for an arbitrary training sample " + }, + { + "bbox": [ + 83, + 651, + 527, + 686 + ], + "type": "inline_equation", + "content": "\\mathbf{x} = (x_{1}, x_{2}, \\ldots, x_{L})" + }, + { + "bbox": [ + 83, + 651, + 527, + 686 + ], + "type": "text", + "content": " with the token length of " + }, + { + "bbox": [ + 83, + 651, + 527, + 686 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 83, + 651, + 527, + 686 + ], + "type": "text", + "content": ", we adopt the standard left-to-right autoregressive objective:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 201, + 693, + 526, + 724 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 201, + 693, + 526, + 724 + ], + "spans": [ + { + "bbox": [ + 201, + 693, + 526, + 724 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\text {f u l l}} (\\theta) = - \\sum_ {i = 2} ^ {L} w _ {i} \\cdot \\log p _ {\\theta} \\left(x _ {i} \\mid x _ {1}, \\dots , x _ {i - 1}\\right), \\tag {5}", + "image_path": "4b335ff589f54e65900f04f73a3dd478322dcb3034ef6ea2f9bcd3140fa3f198.jpg" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 82, + 72, + 527, + 97 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 72, + 527, + 97 + ], + "spans": [ + { + "bbox": [ + 82, + 72, + 527, + 97 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 82, + 72, + 527, + 97 + ], + "type": "inline_equation", + "content": "w_{i}" + }, + { + "bbox": [ + 82, + 72, + 527, + 97 + ], + "type": "text", + "content": " denotes the loss weight of token " + }, + { + "bbox": [ + 82, + 72, + 527, + 97 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 82, + 72, + 527, + 97 + ], + "type": "text", + "content": ". Although this formulation naturally propagates gradients through tokens of all modalities, we restrict the loss computation exclusively to text tokens, resulting in:" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 184, + 101, + 527, + 141 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 184, + 101, + 527, + 141 + ], + "spans": [ + { + "bbox": [ + 184, + 101, + 527, + 141 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\text {t e x t - o n l y}} (\\theta) = - \\sum_ {\\substack {i = 2 \\\\ x _ {i} \\in \\text {T e x t}}} ^ {L} w _ {i} \\cdot \\log p _ {\\theta} \\left(x _ {i} \\mid x _ {1}, \\dots , x _ {i - 1}\\right). \\tag{6}", + "image_path": "26106948eeedbc5699b023a9f15b87fe04a4a87aaecf79a11bf90a130bc8f305.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 82, + 146, + 529, + 213 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 146, + 529, + 213 + ], + "spans": [ + { + "bbox": [ + 82, + 146, + 529, + 213 + ], + "type": "text", + "content": "Under this selective objective, visual tokens serve as conditioning context for text prediction and are not directly predicted. Consequently, the model learns to embed multimodal information in a manner that is beneficial for downstream language decoding tasks. Notably, regarding the design choice of the token weight " + }, + { + "bbox": [ + 82, + 146, + 529, + 213 + ], + "type": "inline_equation", + "content": "w_{i}" + }, + { + "bbox": [ + 82, + 146, + 529, + 213 + ], + "type": "text", + "content": ", as discussed in InternVL2.5 [18], the widely used token averaging and sample averaging strategies can lead to gradients biased toward longer and shorter responses, respectively. To mitigate this issue, we adopt square averaging, which is defined as:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 228, + 225, + 527, + 268 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 228, + 225, + 527, + 268 + ], + "spans": [ + { + "bbox": [ + 228, + 225, + 527, + 268 + ], + "type": "interline_equation", + "content": "w _ {i} = \\left\\{ \\begin{array}{l l} \\frac {1}{l ^ {0}}, & \\text {f o r t o k e n a v e r a g i n g} \\\\ \\frac {1}{l ^ {0 . 5}}, & \\text {f o r s q u a r e a v e r a g i n g} \\\\ \\frac {1}{l ^ {1}}, & \\text {f o r s a m p l e a v e r a g i n g}, \\end{array} \\right. \\tag {7}", + "image_path": "e9f1b1a5e5595c43873b6029f623d54746bdb26df212ceb45762e15ae5a818cd.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 83, + 275, + 492, + 288 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 275, + 492, + 288 + ], + "spans": [ + { + "bbox": [ + 83, + 275, + 492, + 288 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 83, + 275, + 492, + 288 + ], + "type": "inline_equation", + "content": "l" + }, + { + "bbox": [ + 83, + 275, + 492, + 288 + ], + "type": "text", + "content": " denotes the number of tokens in the training sample on which the loss needs to be calculated." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 82, + 296, + 527, + 327 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 296, + 527, + 327 + ], + "spans": [ + { + "bbox": [ + 82, + 296, + 527, + 327 + ], + "type": "text", + "content": "Joint Parameter Optimization. Unlike the conventional \"language-only training followed by multimodal adaptation\" paradigm, our method updates all model parameters jointly during multimodal pre-training. Specifically, let" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 224, + 328, + 526, + 347 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 224, + 328, + 526, + 347 + ], + "spans": [ + { + "bbox": [ + 224, + 328, + 526, + 347 + ], + "type": "interline_equation", + "content": "\\theta^ {*} = \\underset {\\theta} {\\arg \\min } \\mathbb {E} _ {\\mathbf {x} \\in \\mathcal {D} _ {\\text {m u l t i}}} \\left[ \\mathcal {L} _ {\\text {t e x t - o n l y}} (\\theta) \\right], \\tag {8}", + "image_path": "75d2c78becd00bea4f6c06cca69425a7971e2b6d3a0e905b5929f558bbfb943b.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 82, + 350, + 527, + 394 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 350, + 527, + 394 + ], + "spans": [ + { + "bbox": [ + 82, + 350, + 527, + 394 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 82, + 350, + 527, + 394 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_{\\mathrm{multi}}" + }, + { + "bbox": [ + 82, + 350, + 527, + 394 + ], + "type": "text", + "content": " is the union of large-scale text-only and multimodal corpora (e.g., image-text or video-text pairs). We thus optimize a single model to handle these combined data sources. This multi-task joint optimization ensures that text representations and visual features are learned in concert, reinforcing alignment across modalities." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 82, + 399, + 527, + 467 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 399, + 527, + 467 + ], + "spans": [ + { + "bbox": [ + 82, + 399, + 527, + 467 + ], + "type": "text", + "content": "Moreover, this integrated optimization departs from conventional \"language-only training followed by multimodal adaptation\" pipelines, which often freeze or partially fine-tune certain layers in the LLM component or even in the ViT encoder when adapting to MLLM. In contrast, our method trains every layer jointly, allowing all parameters to be jointly optimized on large-scale multimodal corpora and ensuring that both linguistic and visual features evolve synchronously. As a result, the final parameters are primed for high performance on both pure language and multimodal tasks, without additional tuning steps." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 82, + 470, + 527, + 582 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 470, + 527, + 582 + ], + "spans": [ + { + "bbox": [ + 82, + 470, + 527, + 582 + ], + "type": "text", + "content": "Data. The pre-training data utilized in InternVL3 is broadly classified into two categories: multimodal data and pure language data. The multimodal dataset comprises a synthesis of pre-existing datasets alongside newly acquired real-world data. Specifically, we leverage the pre-training corpus from InternVL2.5, which covers a diverse range of domains such as image captioning, general question answering, mathematics, charts, optical character recognition (OCR), knowledge grounding, document understanding, multi-turn dialogue, and medical data. Although the overall data scale was not increased, the utility of this dataset was significantly improved by updating not only to the MLP module weights but also to those associated with the ViT and LLM components. In addition, to enhance the model's ability to generalize in real-world applications, additional data is incorporated from tasks related to graphical user interfaces (GUI), tool usage, 3D scene understanding, and video comprehension." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 82, + 585, + 527, + 653 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 585, + 527, + 653 + ], + "spans": [ + { + "bbox": [ + 82, + 585, + 527, + 653 + ], + "type": "text", + "content": "To compensate for the relatively short and less diverse textual content typically found in multimodal datasets, we integrate pure language data into the pre-training process. This helps preserve and amplify the model's capabilities in language understanding and generation. The language corpus is primarily constructed on the pre-training data from InternLM2.5 and is further augmented with various open-source text datasets [8, 77, 79]. This enhancement aims to improve the model's performance on knowledge-intensive tasks, as well as its proficiency in mathematical and reasoning tasks." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 82, + 655, + 527, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 655, + 527, + 723 + ], + "spans": [ + { + "bbox": [ + 82, + 655, + 527, + 723 + ], + "type": "text", + "content": "Given the complexity of balancing these heterogeneous data sources, determining an appropriate sampling strategy is non-trivial. In InternVL3, we adopt a two-stage strategy to establish the optimal sampling ratio between multimodal and language data. Initially, we train separate models on the multimodal and language datasets and evaluate their performance on corresponding benchmarks, allowing us to identify optimal sampling ratios within each modality. Then, under a fixed total training budget, we combine the two modalities and determine their relative sampling ratio. Empirical studies show that a 1:3 ratio of language to multimodal data" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 82, + 72, + 529, + 106 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 72, + 529, + 106 + ], + "spans": [ + { + "bbox": [ + 82, + 72, + 529, + 106 + ], + "type": "text", + "content": "yields the best overall performance across both unimodal and multimodal benchmarks. Under this configuration, the total number of training tokens is approximately 200 billion, comprising 50 billion from language data and 150 billion from multimodal data." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 83, + 119, + 168, + 132 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 119, + 168, + 132 + ], + "spans": [ + { + "bbox": [ + 83, + 119, + 168, + 132 + ], + "type": "text", + "content": "2.3 Post-Training" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 82, + 140, + 529, + 196 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 140, + 529, + 196 + ], + "spans": [ + { + "bbox": [ + 82, + 140, + 529, + 196 + ], + "type": "text", + "content": "After the Native Multimodal Pre-Training, we apply a two-stage post-training strategy to further enhance the multimodal conversation and reasoning abilities of our models. This strategy consists of Supervised Fine-Tuning (SFT) and Mixed Preference Optimization (MPO). In the SFT phase, the model is trained to imitate the high-quality responses under positive supervision signals. In the subsequent MPO phase, we introduce additional supervision from both positive and negative samples, thereby further improving its overall abilities." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 82, + 199, + 529, + 266 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 199, + 529, + 266 + ], + "spans": [ + { + "bbox": [ + 82, + 199, + 529, + 266 + ], + "type": "text", + "content": "Supervised Fine-Tuning. In this phase, the techniques of random JPEG compression, square loss re-weighting, and multimodal data packing proposed in InternVL2.5 [18] are also employed in the InternVL3 series. The main advancement of the SFT phase in InternVL3 compared to InternVL2.5 lies in the use of higher-quality and more diverse training data. Specifically, we further extend training samples for tool usage, 3D scene understanding, GUI operations, long context tasks, video understanding, scientific diagrams, creative writing, and multimodal reasoning." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 82, + 270, + 529, + 361 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 270, + 529, + 361 + ], + "spans": [ + { + "bbox": [ + 82, + 270, + 529, + 361 + ], + "type": "text", + "content": "Mixed Preference Optimization. During Pre-training and SFT, the model is trained to predict the next token conditioned on previous ground-truth tokens. However, during inference, the model predicts each token based on its own prior outputs. This discrepancy between ground-truth tokens and model-predicted tokens introduces a distribution shift, which can impair the model's Chain-of-Thought (CoT) reasoning capabilities. To mitigate this issue, we employ Mixed Preference Optimization (MPO) [124], which introduces additional supervision from both positive and negative samples to align the model response distribution with the ground-truth distribution, thereby improving reasoning performance. Specifically, the training objective of MPO is a combination of preference loss " + }, + { + "bbox": [ + 82, + 270, + 529, + 361 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_p" + }, + { + "bbox": [ + 82, + 270, + 529, + 361 + ], + "type": "text", + "content": ", quality loss " + }, + { + "bbox": [ + 82, + 270, + 529, + 361 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_q" + }, + { + "bbox": [ + 82, + 270, + 529, + 361 + ], + "type": "text", + "content": ", and generation loss " + }, + { + "bbox": [ + 82, + 270, + 529, + 361 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_g" + }, + { + "bbox": [ + 82, + 270, + 529, + 361 + ], + "type": "text", + "content": ", which can be formulated as follows:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 244, + 365, + 528, + 379 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 244, + 365, + 528, + 379 + ], + "spans": [ + { + "bbox": [ + 244, + 365, + 528, + 379 + ], + "type": "interline_equation", + "content": "\\mathcal {L} = w _ {p} \\mathcal {L} _ {p} + w _ {q} \\mathcal {L} _ {q} + w _ {g} \\mathcal {L} _ {g}, \\tag {9}", + "image_path": "e2e35c7fd7d71c77b1871156fb34e9db2011cc2fa2a0bf40b9c3f1cf96176906.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 82, + 384, + 528, + 407 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 384, + 528, + 407 + ], + "spans": [ + { + "bbox": [ + 82, + 384, + 528, + 407 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 82, + 384, + 528, + 407 + ], + "type": "inline_equation", + "content": "w_{*}" + }, + { + "bbox": [ + 82, + 384, + 528, + 407 + ], + "type": "text", + "content": " represents the weight assigned to each loss component. Specifically, the DPO loss [101] serves as the preference loss to enable the model to learn the relative preference between chosen and rejected responses:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 199, + 411, + 528, + 436 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 199, + 411, + 528, + 436 + ], + "spans": [ + { + "bbox": [ + 199, + 411, + 528, + 436 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {p} = - \\log \\sigma \\left(\\beta \\log \\frac {\\pi_ {\\theta} \\left(y _ {c} \\mid x\\right)}{\\pi_ {0} \\left(y _ {c} \\mid x\\right)} - \\beta \\log \\frac {\\pi_ {\\theta} \\left(y _ {r} \\mid x\\right)}{\\pi_ {0} \\left(y _ {r} \\mid x\\right)}\\right), \\tag {10}", + "image_path": "6dc2a72e211edaa712e7c3f6af8295472eb9751fb7d841b2f4c7f75b232c901e.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 82, + 440, + 528, + 475 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 440, + 528, + 475 + ], + "spans": [ + { + "bbox": [ + 82, + 440, + 528, + 475 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 82, + 440, + 528, + 475 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 82, + 440, + 528, + 475 + ], + "type": "text", + "content": " is the KL penalty coefficient, and " + }, + { + "bbox": [ + 82, + 440, + 528, + 475 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 82, + 440, + 528, + 475 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 82, + 440, + 528, + 475 + ], + "type": "inline_equation", + "content": "y_{c}" + }, + { + "bbox": [ + 82, + 440, + 528, + 475 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 82, + 440, + 528, + 475 + ], + "type": "inline_equation", + "content": "y_{r}" + }, + { + "bbox": [ + 82, + 440, + 528, + 475 + ], + "type": "text", + "content": " are user query, chosen response, and rejected response, respectively. The policy model " + }, + { + "bbox": [ + 82, + 440, + 528, + 475 + ], + "type": "inline_equation", + "content": "\\pi_{\\theta}" + }, + { + "bbox": [ + 82, + 440, + 528, + 475 + ], + "type": "text", + "content": " is initialized from model " + }, + { + "bbox": [ + 82, + 440, + 528, + 475 + ], + "type": "inline_equation", + "content": "\\pi_0" + }, + { + "bbox": [ + 82, + 440, + 528, + 475 + ], + "type": "text", + "content": ". After that, the BCO loss [53] is employed as the quality loss, which helps the model to understand the absolute quality of individual responses:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 269, + 479, + 528, + 495 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 269, + 479, + 528, + 495 + ], + "spans": [ + { + "bbox": [ + 269, + 479, + 528, + 495 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {q} = \\mathcal {L} _ {q} ^ {+} + \\mathcal {L} _ {q} ^ {-}, \\tag {11}", + "image_path": "fc1ade31da6480ea486fb3150830af1b4f921e178c54bb077cec9b471f43e941.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 82, + 500, + 528, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 500, + 528, + 533 + ], + "spans": [ + { + "bbox": [ + 82, + 500, + 528, + 533 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 82, + 500, + 528, + 533 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_q^+" + }, + { + "bbox": [ + 82, + 500, + 528, + 533 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 82, + 500, + 528, + 533 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_q^-" + }, + { + "bbox": [ + 82, + 500, + 528, + 533 + ], + "type": "text", + "content": " represent the loss for chosen and rejected responses, respectively. They are calculated independently, requiring the model to differentiate the absolute quality of individual responses. The loss terms are given by:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 228, + 533, + 528, + 558 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 228, + 533, + 528, + 558 + ], + "spans": [ + { + "bbox": [ + 228, + 533, + 528, + 558 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {q} ^ {+} = - \\log \\sigma \\left(\\beta \\log \\frac {\\pi_ {\\theta} \\left(y _ {c} \\mid x\\right)}{\\pi_ {0} \\left(y _ {c} \\mid x\\right)} - \\delta\\right), \\tag {12}", + "image_path": "6d60b392f37102e7852baf467313d21be5a78ccf54c1cab110baa1eacd2d5320.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 216, + 559, + 528, + 583 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 216, + 559, + 528, + 583 + ], + "spans": [ + { + "bbox": [ + 216, + 559, + 528, + 583 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {q} ^ {-} = - \\log \\sigma \\left(- \\left(\\beta \\log \\frac {\\pi_ {\\theta} \\left(y _ {r} \\mid x\\right)}{\\pi_ {0} \\left(y _ {r} \\mid x\\right)} - \\delta\\right)\\right), \\tag {13}", + "image_path": "72726958353c8d32ccc1a02d8065ac4177a8f911d8dbe5434805ed363de54cb0.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 82, + 585, + 528, + 619 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 585, + 528, + 619 + ], + "spans": [ + { + "bbox": [ + 82, + 585, + 528, + 619 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 82, + 585, + 528, + 619 + ], + "type": "inline_equation", + "content": "\\delta" + }, + { + "bbox": [ + 82, + 585, + 528, + 619 + ], + "type": "text", + "content": " represents the reward shift, calculated as the moving average of previous rewards to stabilize training. Finally, the LM loss is used as the generation loss to help the model learn the generation process of preferred responses. The loss function is defined in Equation 6." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 82, + 623, + 529, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 623, + 529, + 723 + ], + "spans": [ + { + "bbox": [ + 82, + 623, + 529, + 723 + ], + "type": "text", + "content": "Data. For SFT data, we construct the training corpora based on those used in InternVL2.5 [18] while introducing additional tool usage, 3D scene understanding, GUI operations, scientific diagrams, creative writing, and multimodal reasoning samples. As a result, the number of training samples grows from 16.3M in InternVL2.5 to 21.7M in InternVL3. For MPO data, we construct preference pairs based on the data pipeline and samples proposed in MMPR v1.2 [124], which cover a wide range of domains, including general visual question answering (VQA) [43, 50, 90, 83, 127, 126], science [57, 16, 82], chart [91, 54, 11], mathematics [72, 104, 10, 81, 55, 40, 147, 106], OCR [92, 107, 9, 49, 96], and document [24]. We use the SFT versions of InternVL3-8B, 38B, and 78B to generate rollouts. During the MPO phase, all models are trained on the same dataset, which comprises about 300K samples." + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 742, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 742, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 742, + 309, + 750 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "bbox": [ + 83, + 72, + 186, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 72, + 186, + 85 + ], + "spans": [ + { + "bbox": [ + 83, + 72, + 186, + 85 + ], + "type": "text", + "content": "2.4 Test-Time Scaling" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 82, + 92, + 527, + 127 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 92, + 527, + 127 + ], + "spans": [ + { + "bbox": [ + 82, + 92, + 527, + 127 + ], + "type": "text", + "content": "Test-Time Scaling has been shown to be an effective method to enhance the reasoning abilities of LLMs and MLLMs [108, 94, 87, 70, 120, 36, 152, 125]. In this work, we use the Best-of-N evaluation strategy and employ VisualPRM-8B [125] as the critic model to select the best response for reasoning and mathematics evaluation." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 82, + 131, + 527, + 198 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 131, + 527, + 198 + ], + "spans": [ + { + "bbox": [ + 82, + 131, + 527, + 198 + ], + "type": "text", + "content": "Visual Process Reward Model. VisualPRM first assigns a quality score to each step of the given solution and then averages these scores to obtain the overall score for this solution. This process is formulated as a multi-turn chat task so that we can effectively leverage the generation ability of MLLMs. The image " + }, + { + "bbox": [ + 82, + 131, + 527, + 198 + ], + "type": "inline_equation", + "content": "I" + }, + { + "bbox": [ + 82, + 131, + 527, + 198 + ], + "type": "text", + "content": ", question " + }, + { + "bbox": [ + 82, + 131, + 527, + 198 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 82, + 131, + 527, + 198 + ], + "type": "text", + "content": ", and the first step " + }, + { + "bbox": [ + 82, + 131, + 527, + 198 + ], + "type": "inline_equation", + "content": "s_0" + }, + { + "bbox": [ + 82, + 131, + 527, + 198 + ], + "type": "text", + "content": " of the step-by-step solution " + }, + { + "bbox": [ + 82, + 131, + 527, + 198 + ], + "type": "inline_equation", + "content": "s = \\{s_0, s_1, \\dots, s_n\\} \\in S" + }, + { + "bbox": [ + 82, + 131, + 527, + 198 + ], + "type": "text", + "content": " to this question are included in the first turn and a new step is presented in each subsequent turn. During the training stage, the model is required to predict the correctness of the given step in each turn as follows:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 257, + 203, + 526, + 217 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 257, + 203, + 526, + 217 + ], + "spans": [ + { + "bbox": [ + 257, + 203, + 526, + 217 + ], + "type": "interline_equation", + "content": "c _ {i} \\sim M \\left(y _ {i} \\mid I, q, s _ {\\leq i}\\right), \\tag {14}", + "image_path": "7c717a81f7447af860e1053745eb32f2b52b80bae0d116059fd98f31dcf80959.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 82, + 222, + 526, + 246 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 222, + 526, + 246 + ], + "spans": [ + { + "bbox": [ + 82, + 222, + 526, + 246 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 82, + 222, + 526, + 246 + ], + "type": "inline_equation", + "content": "c_{i} \\in \\{+, -\\}" + }, + { + "bbox": [ + 82, + 222, + 526, + 246 + ], + "type": "text", + "content": " denotes the correctness of " + }, + { + "bbox": [ + 82, + 222, + 526, + 246 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 82, + 222, + 526, + 246 + ], + "type": "text", + "content": "-th step. During the inference stage, the score for each step is defined as the probability of generating \"+\"" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 82, + 250, + 527, + 284 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 250, + 527, + 284 + ], + "spans": [ + { + "bbox": [ + 82, + 250, + 527, + 284 + ], + "type": "text", + "content": "Data. VisualPRM400K [125] is used to train VisualPRM, which is constructed based on multimodal questions collected from MMPR v1.2 [124]. Following the data pipeline in VisualPRM400K, we further expand VisualPRM400K by sampling rollouts from the 8B and 38B variants of InternVL3." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 83, + 297, + 171, + 308 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 297, + 171, + 308 + ], + "spans": [ + { + "bbox": [ + 83, + 297, + 171, + 308 + ], + "type": "text", + "content": "2.5 Infrastructure" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 82, + 317, + 527, + 396 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 317, + 527, + 396 + ], + "spans": [ + { + "bbox": [ + 82, + 317, + 527, + 396 + ], + "type": "text", + "content": "To facilitate model training, we extend the InternEVO framework [15]—originally designed to optimize the Zero Redundancy Optimizer (ZeRO) for large-scale LLM training—to support the training of our InternVL models. This extension enables efficient scaling to hundreds of billions of parameters across thousands of GPUs. The enhanced framework introduces flexible and decoupled sharding strategies for the ViT, MLP, and LLM components, significantly improving training efficiency by overlapping communication and computation. It further supports a comprehensive range of parallelism strategies—including data, tensor, sequence, and pipeline parallelism—as well as their arbitrary combinations." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 82, + 399, + 527, + 443 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 399, + 527, + 443 + ], + "spans": [ + { + "bbox": [ + 82, + 399, + 527, + 443 + ], + "type": "text", + "content": "A key challenge in MLLM training is the imbalance in computational load caused by the varying proportions of visual and textual tokens. Such imbalances can lead to inefficiencies by overburdening either the ViT or LLM modules. To address this, we introduce a suite of techniques that dynamically balance computational workloads across modules, ensuring efficient and equitable resource utilization." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 82, + 449, + 527, + 525 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 449, + 527, + 525 + ], + "spans": [ + { + "bbox": [ + 82, + 449, + 527, + 525 + ], + "type": "text", + "content": "For InternVL models of varying scales, the extended InternEVO framework formulates an optimization objective that identifies the optimal configuration to minimize both memory consumption and communication overhead across different module dimensions. To support sequences of up to 32K tokens, our approach incorporates both head-parallel and sequence-parallel techniques, effectively overcoming scalability bottlenecks while preserving computational efficiency. Compared to the training of InternVL2.5, the application of InternEVO in InternVL3 results in a training speedup of " + }, + { + "bbox": [ + 82, + 449, + 527, + 525 + ], + "type": "inline_equation", + "content": "50\\%" + }, + { + "bbox": [ + 82, + 449, + 527, + 525 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 82, + 449, + 527, + 525 + ], + "type": "inline_equation", + "content": "200\\%" + }, + { + "bbox": [ + 82, + 449, + 527, + 525 + ], + "type": "text", + "content": " for models of comparable size, given the same computational budget." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 83, + 542, + 170, + 555 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 542, + 170, + 555 + ], + "spans": [ + { + "bbox": [ + 83, + 542, + 170, + 555 + ], + "type": "text", + "content": "3 Experiments" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 82, + 567, + 527, + 667 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 567, + 527, + 667 + ], + "spans": [ + { + "bbox": [ + 82, + 567, + 527, + 667 + ], + "type": "text", + "content": "In this section, we first compare the overall multimodal capabilities of InternVL3 with those of current advanced MLLMs using widely adopted multimodal benchmarks. Subsequently, we evaluate the performance of InternVL3 in various domains, including multimodal reasoning, mathematics, optical character recognition (OCR), chart and document understanding, multi-image understanding, real-world comprehension, comprehensive multimodal evaluation, multimodal hallucination evaluation, visual grounding, multimodal multilingual understanding, video understanding, and other multimodal tasks, most of which were tested using VLMEvalKit [33]. Additionally, we provide a detailed evaluation of the language capabilities of InternVL3. Finally, we analyze the advantages of several key modifications in InternVL3 compared to its predecessor, InternVL2.5, including the naive multimodal pre-training, the V2PE positional encoding, and the improvements brought by the post-training technique." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 83, + 679, + 320, + 691 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 679, + 320, + 691 + ], + "spans": [ + { + "bbox": [ + 83, + 679, + 320, + 691 + ], + "type": "text", + "content": "3.1 Overall Comparison to Other Advanced MLLMs" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 82, + 700, + 527, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 700, + 527, + 723 + ], + "spans": [ + { + "bbox": [ + 82, + 700, + 527, + 723 + ], + "type": "text", + "content": "Figure 1 provides a detailed assessment of InternVL3's performance across a diverse set of benchmarks, including MMMU [141], MathVista [80], AI2D [57], ChartQA [91], DocVQA [93], InfographicVQA [92]," + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "bbox": [ + 82, + 72, + 527, + 118 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 72, + 527, + 118 + ], + "spans": [ + { + "bbox": [ + 82, + 72, + 527, + 118 + ], + "type": "text", + "content": "HallusionBench [45], OCRBench [76], and LongVideoBench [129]. Compared with previous models, InternVL3 demonstrates substantial improvements across a wide range of task categories. These advancements can be primarily attributed to enhanced training strategies, refined testing methodologies, and the expanded training corpus." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 82, + 121, + 527, + 167 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 121, + 527, + 167 + ], + "spans": [ + { + "bbox": [ + 82, + 121, + 527, + 167 + ], + "type": "text", + "content": "More specifically, InternVL3 achieves an impressive score of 72.2 on the MMMU benchmark, underscoring its superior capacity to manage complex multimodal challenges. Beyond its performance on MMMU, InternVL3 consistently outperforms earlier versions of the InternVL series on a variety of tasks, thereby emphasizing its broad applicability to real-world scenarios that require sophisticated multimodal comprehension and reasoning." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 82, + 171, + 527, + 247 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 171, + 527, + 247 + ], + "spans": [ + { + "bbox": [ + 82, + 171, + 527, + 247 + ], + "type": "text", + "content": "In addition to surpassing its open-source counterparts, InternVL3 exhibits competitive performance relative to leading closed-source commercial models, such as ChatGPT-4o-latest [98] and Claude-3.5 Sonnet [3]. In many cases, the performance gap between InternVL3 and these proprietary models is notably narrowed—and in certain benchmarks, such as AI2D and ChartQA, InternVL3 even surpasses them. Nonetheless, our results further reveal that Gemini2.5 Pro [117] maintains a performance edge on select tasks (e.g., on HallusionBench), indicating that despite the notable progress in InternVL3, there remains room for further refinement of our InternVL series." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 83, + 262, + 283, + 274 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 262, + 283, + 274 + ], + "spans": [ + { + "bbox": [ + 83, + 262, + 283, + 274 + ], + "type": "text", + "content": "3.2 Multimodal Reasoning and Mathematics" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 82, + 282, + 527, + 327 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 282, + 527, + 327 + ], + "spans": [ + { + "bbox": [ + 82, + 282, + 527, + 327 + ], + "type": "text", + "content": "To comprehensively evaluate the multimodal reasoning and mathematical capabilities of InternVL3, we conduct experiments on a series of benchmarks, including MMMU [141] for multidisciplinary reasoning, MathVista [80], MathVision [119], MathVerse [146] for mathematical reasoning, as well as DynaMath [155], WeMath [99] and LogicVista [131] for complementary evaluation on logical reasoning." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 82, + 331, + 527, + 441 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 331, + 527, + 441 + ], + "spans": [ + { + "bbox": [ + 82, + 331, + 527, + 441 + ], + "type": "text", + "content": "As shown in Table 2, InternVL3 exhibits strong performance across all tested benchmarks. Specifically, on the MMMU benchmark, InternVL3-based models consistently outperform smaller-scale competitors. For instance, with increasing model size, InternVL3-78B reaches a score over 72 on MMMU, indicating robust understanding and reasoning capability in handling abstract multidisciplinary concepts. In the mathematical domain, InternVL3 demonstrates significant gains across various benchmarks. On MathVista, InternVL3-78B records a performance close to 79.0, while on MathVision and MathVerse, the results are also competitive, evidencing the model's enhanced ability to tackle challenging mathematical problems. Furthermore, performance on DynaMath, WeMath, and LogicVista consistently improves with scaling. The overall score—a mean calculated across all benchmarks—shows that InternVL3 models achieve a balanced enhancement across different aspects, surpassing many of the preceding open-source methods." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 82, + 445, + 527, + 502 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 445, + 527, + 502 + ], + "spans": [ + { + "bbox": [ + 82, + 445, + 527, + 502 + ], + "type": "text", + "content": "A notable characteristic of InternVL3 is the efficiency of the best-of-N evaluation strategy [125]. When applying this method, even models with relatively smaller parameter sizes (e.g., InternVL3-1B and InternVL3-2B) exhibit substantial improvements in reasoning performance. Specifically, in the Vision-Only split of MathVerse, the best-of-8 strategy leads to increases of approximately 6.0 and 3.2 percentage points for InternVL3-38B and InternVL3-78B, respectively. This improvement underscores the effectiveness of test-time scaling." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 83, + 515, + 295, + 528 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 515, + 295, + 528 + ], + "spans": [ + { + "bbox": [ + 83, + 515, + 295, + 528 + ], + "type": "text", + "content": "3.3 OCR, Chart, and Document Understanding" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 82, + 536, + 527, + 602 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 536, + 527, + 602 + ], + "spans": [ + { + "bbox": [ + 82, + 536, + 527, + 602 + ], + "type": "text", + "content": "To assess the model's integrated vision-language understanding in tasks involving text, document, and chart comprehension, we perform a comprehensive evaluation over nine benchmarks, including AI2D [57], ChartQA [91], TextVQA [107], DocVQA [93], InfoVQA [92], OCRBench [76], SEED-2-Plus [61], CharXiv [128], and VCR [148]. As illustrated in Table 3, the InternVL3 series not only maintains robust performance across these benchmarks but also demonstrates competitive or superior results when compared to other open-source and closed-source counterparts." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 82, + 607, + 527, + 674 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 607, + 527, + 674 + ], + "spans": [ + { + "bbox": [ + 82, + 607, + 527, + 674 + ], + "type": "text", + "content": "At the 1B scale, InternVL3-1B achieves performance that is roughly on par with previous lower-scale models. At the 2B scale, InternVL3-2B not only improves its absolute scores—for instance, reaching 78.7/87.4 on AI2D and 88.3 on DocVQA—but also exhibits a performance edge over similarly parameterized models such as Qwen2-VL-2B [121]. Although its TextVQA performance (77.0) remains comparable to that of Qwen2-VL-2B, the enhancements in document and chart understanding suggest that the proposed native multimodal pre-training are particularly effective in tasks requiring precise visual-textual integration." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 82, + 677, + 527, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 677, + 527, + 723 + ], + "spans": [ + { + "bbox": [ + 82, + 677, + 527, + 723 + ], + "type": "text", + "content": "The benefits of the new pre-training protocol become even more pronounced at larger scales. Mid-scale models like InternVL3-8B and InternVL3-9B deliver substantial gains, with InternVL3-8B achieving 85.2/92.6 on AI2D, 92.7 on DocVQA, and VCR scores of 94.5/98.1. Moreover, when compared with heavyweight systems such as Qwen2-VL-72B [121] or even closed-source models like GPT-4o-20240513 [97], the high-scale variants" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 102, + 68, + 509, + 460 + ], + "blocks": [ + { + "bbox": [ + 102, + 68, + 509, + 460 + ], + "lines": [ + { + "bbox": [ + 102, + 68, + 509, + 460 + ], + "spans": [ + { + "bbox": [ + 102, + 68, + 509, + 460 + ], + "type": "table", + "html": "
ModelMMMUMathVistaMathVisionMathVerseDynaMathWeMathLogicVistaOverall
LLaVA-OV-0.5B [60]31.434.8------
InternVL2.5-1B [18]41.247.121.116.45.611.126.024.1
InternVL3-1B43.445.818.818.75.813.429.825.1
w/ VisualPRM-Bo8 [125]55.462.121.728.913.428.534.935.0
Aquila-VL-2B [44]46.959.117.917.45.015.930.627.5
Qwen2.5-VL-3B [7]51.261.221.931.213.222.940.334.6
Ovis-2B [84]45.664.117.729.410.09.934.730.2
Ovis-4B [84]49.069.621.538.518.016.935.335.5
InternVL2.5-2B [18]43.251.114.022.34.48.027.324.3
InternVL2.5-4B [18]51.864.118.427.715.221.234.233.2
InternVL3-2B48.657.021.725.314.622.436.932.4
w/ VisualPRM-Bo8 [125]57.870.526.636.721.438.540.541.7
LLaVA-OV-7B [60]47.958.618.319.39.020.933.329.6
MiniCPM-V2.6 [135]49.860.823.418.99.816.427.529.5
MiniCPM-o2.6 [135]50.973.321.735.010.425.236.036.1
Ovis-8B [84]57.471.825.942.320.427.239.440.6
Qwen2.5-VL-8B [7]55.067.825.441.121.035.244.141.4
InternVL2.5-8B [18]56.264.517.022.89.423.536.032.8
InternVL3-8B62.771.629.339.825.537.144.144.3
w/ VisualPRM-Bo8 [125]66.075.237.546.328.548.149.750.2
InternVL3-9B57.771.527.635.326.733.849.243.1
w/ VisualPRM-Bo8 [125]63.776.233.945.829.146.650.649.4
Ovis2-16B [84]60.773.730.145.826.345.047.447.0
InternVL2.5-26B [18]60.768.223.424.011.430.939.636.9
InternVL3-14B67.175.137.244.431.343.051.249.9
w/ VisualPRM-Bo8 [125]69.377.940.147.733.152.056.253.8
Cambrian-34B [116]49.753.2------
VILA-1.5-40B [71]55.149.5------
Ovis2-34B [84]66.776.131.950.127.551.949.950.6
InternVL2.5-38B [18]63.971.932.236.920.038.347.944.4
InternVL3-38B70.175.134.248.235.348.658.452.8
w/ VisualPRM-Bo8 [125]71.079.441.854.236.155.258.456.6
GPT-4o-20241120 [97]70.760.031.240.634.545.852.847.9
Claude-3.7-Sonnet [3]75.066.841.946.739.749.358.253.9
Gemini-2.0-Flash [30]72.670.443.647.842.147.452.353.7
Gemini-2.0-Pro [29]69.971.348.167.343.356.553.258.5
LLaVA-OV-72B [60]55.767.125.327.215.632.040.937.7
QvQ-72B-Preview [115]70.370.334.948.230.739.058.250.2
Qwen2.5-VL-72B [7]68.274.239.347.335.949.155.752.8
InternVL2.5-78B [18]70.072.332.239.219.239.849.046.0
InternVL3-78B72.279.043.151.035.146.155.954.6
w/ VisualPRM-Bo8 [125]72.280.540.854.237.352.457.956.5
", + "image_path": "7dfb9f75ca6b528ec382461cc9c8557cfaeffdfdd09cb987fa67c8df23797837.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 83, + 465, + 529, + 534 + ], + "lines": [ + { + "bbox": [ + 83, + 465, + 529, + 534 + ], + "spans": [ + { + "bbox": [ + 83, + 465, + 529, + 534 + ], + "type": "text", + "content": "Table 2: Comparison of multimodal reasoning and mathematical performance. MMMU [141] is a multidisciplinary reasoning benchmark. MathVista [80], MathVision [119], MathVerse [146], DynaMath [155], and WeMath [99] are mathematics benchmarks. For MathVerse, we report the performance on Vision-Only split. LogicVista [131] is a logical reasoning benchmark. Part of the results are collected from the OpenCompass leaderboard [26]. The overall score is the average score of the above benchmarks. \"w/ VisualPRM-Bo8\" denotes that the model is evaluated with Best-of-8 settings, where VisualPRM [125] serves as the critic model." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 82, + 556, + 529, + 592 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 556, + 529, + 592 + ], + "spans": [ + { + "bbox": [ + 82, + 556, + 529, + 592 + ], + "type": "text", + "content": "of InternVL3—particularly InternVL3-38B and InternVL3-78B—push the envelope further. For instance, InternVL3-78B attains a remarkable OCRBench score of 906 and VCR scores of 96.0/98.6, clearly surpassing the corresponding metrics of comparable models." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 83, + 607, + 231, + 620 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 607, + 231, + 620 + ], + "spans": [ + { + "bbox": [ + 83, + 607, + 231, + 620 + ], + "type": "text", + "content": "3.4 Multi-Image Understanding" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 82, + 628, + 527, + 685 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 628, + 527, + 685 + ], + "spans": [ + { + "bbox": [ + 82, + 628, + 527, + 685 + ], + "type": "text", + "content": "we evaluate the multi-image relation perception and understanding capabilities of InternVL3 across a suite of widely recognized benchmarks, including BLINK [39], Mantis-Eval [51], MMIU [95], MuirBench [118], MMT-Bench [137], and MIRB [153], as presented in Table 4. These benchmarks comprehensively assess skills such as cross-image reasoning and context integration, all of which are crucial for effective multimodal interaction." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 83, + 689, + 529, + 724 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 689, + 529, + 724 + ], + "spans": [ + { + "bbox": [ + 83, + 689, + 529, + 724 + ], + "type": "text", + "content": "InternVL3 consistently outperforms its earlier counterparts across different parameter scales. For instance, at the 1B scale, InternVL3-1B exhibits a modest yet consistent improvement over preceding models, achieving a BLINK score of 42.9 and an MMT-Bench score of 52.9. The performance gains become even more pronounced" + } + ] + } + ], + "index": 5 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 6 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 85, + 69, + 526, + 491 + ], + "blocks": [ + { + "bbox": [ + 85, + 69, + 526, + 491 + ], + "lines": [ + { + "bbox": [ + 85, + 69, + 526, + 491 + ], + "spans": [ + { + "bbox": [ + 85, + 69, + 526, + 491 + ], + "type": "table", + "html": "
Model NameAI2D (w / wo M)ChartQA (test avg)TextVQA (val)DocVQA (test)InfoVQA (test)OCR BenchSEED-2 PlusCharXiv (RQ / DQ)VCR-EN-Easy (EM / Jaccard)Overall
LLaVA-OneVision-0.5B [60]57.1 / -61.4-70.041.8565----
InternVL2-1B [19]64.1 / 70.572.970.581.750.975454.318.1 / 30.721.5 / 48.454.9
InternVL2.5-1B [18]69.3 / 77.875.972.084.856.078559.019.0 / 38.491.5 / 97.068.3
InternVL3-1B69.4 / 78.375.374.181.953.779058.221.0 / 47.189.3 / 96.268.6
Qwen2-VL-2B [121]74.7 / 84.673.579.790.165.580962.4-81.5 / --
Qwen2.5-VL-3B [7]81.6 / -84.079.393.977.179767.631.3 / 58.6--
Aquila-VL-2B [44]75.0 / -76.576.485.058.377263.0-70.0 / --
InternVL2-2B [19]74.1 / 82.376.273.486.958.978460.021.0 / 40.632.9 / 59.262.0
InternVL2.5-2B [18]74.9 / 83.579.274.388.760.980460.921.3 / 49.793.2 / 97.672.1
InternVL3-2B78.7 / 87.480.277.088.366.183564.628.3 / 54.791.2 / 96.974.7
Ovis1.6-Gemma2-9B [84]84.4 / -----830----
MiniCPM-V2.6 [135]82.1 / -82.480.190.8-85265.731.0 / 57.173.9 / 85.7-
Molmo-7B-D [31]- / 93.284.181.792.272.6694----
Qwen2-VL-7B [121]83.0 / 92.183.084.394.576.586669.0-89.7 / 93.8-
Qwen2.5-VL-7B [7]83.9 / -87.384.995.782.686470.442.5/73.9--
InternVL2-8B [19]83.8 / 91.783.377.491.674.879467.531.2 / 56.137.9 / 61.569.7
InternVL2.5-8B [18]84.5 / 92.884.879.193.077.682269.732.9 / 68.692.6 / 97.479.6
InternVL3-8B85.2 / 92.686.680.292.776.888069.737.6 / 73.694.5 / 98.181.3
InternVL3-9B84.6 / 92.986.279.493.679.687768.838.0 / 72.594.2 / 97.981.3
InternVL3-14B86.0 / 93.787.380.594.183.687570.343.1 / 82.294.8 / 98.283.4
InternVL-Chat-V1.5 [19]80.7 / 89.883.880.690.972.572466.329.2 / 58.514.7 / 51.465.9
InternVL2-26B [19]84.5 / 92.584.982.392.975.982567.633.4 / 62.474.5 / 86.776.7
InternVL2.5-26B [18]86.4 / 94.487.282.494.079.885270.835.9 / 73.594.4 / 98.081.8
Qwen2.5-VL-32B [7]---94.883.4-----
Cambrian-34B [116]79.5 / -75.676.775.546.0600-27.3 / 59.779.7 / 89.3-
VILA-1.5-40B [71]69.9 / -67.273.6--460-24.0 / 38.7--
InternVL2-40B [19]86.6 / 94.586.283.093.978.783769.232.3 / 66.084.7 / 92.679.3
InternVL2.5-38B [18]87.6 / 95.188.282.795.383.684271.242.4 / 79.694.7 / 98.283.6
InternVL3-38B88.9 / 95.589.283.995.485.088671.646.4 / 87.296.1 / 98.785.5
GPT-4V [97]78.2 / 89.478.578.088.475.164553.837.1 / 79.952.0 / 65.470.0
GPT-4o-20240513 [97]84.6 / 94.285.777.492.879.273672.047.1 / 84.591.6 / 96.481.6
Claude-3-Opus [3]70.6 / 88.180.867.589.355.669444.230.2 / 71.662.0 / 77.767.3
Claude-3.5-Sonnet [3]81.2 / 94.790.874.195.274.378871.760.2 / 84.363.9 / 74.778.7
Gemini-1.5-Pro [102]79.1 / 94.487.278.893.181.0754-43.3 / 72.062.7 / 77.7-
LLaVA-OneVision-72B [60]85.6 / -83.780.591.374.9741----
NVLM-D-72B [28]85.2 / 94.286.082.192.6-853----
Molmo-72B [31]- / 96.387.383.193.581.9-----
Qwen2-VL-72B [121]88.1 / -88.385.596.584.5877--91.3 / 94.6-
Qwen2.5-VL-72B [7]88.7 / -89.583.596.487.388573.049.7 / 87.4--
InternVL2-Llama3-76B [19]87.6 / 94.888.484.494.182.083969.738.9 / 75.283.2 / 91.381.1
InternVL2.5-78B [18]89.1 / 95.788.383.495.184.185471.342.4 / 82.395.7 / 94.583.9
InternVL3-78B89.7 / 96.089.784.395.486.590671.946.0 / 85.196.0 / 98.685.8
", + "image_path": "c59cf6f8b14dce70a608cfda365f8d999fdb86411a13c8775b6106a10a119b34.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 82, + 497, + 526, + 542 + ], + "lines": [ + { + "bbox": [ + 82, + 497, + 526, + 542 + ], + "spans": [ + { + "bbox": [ + 82, + 497, + 526, + 542 + ], + "type": "text", + "content": "Table 3: Comparison of OCR, chart, and document understanding performance. We evaluate OCR-related capabilities across 9 benchmarks, including AI2D [57], ChartQA [91], TextVQA [107], DocVQA [93], InfoVQA [92], OCRBench [76], SEED-2-Plus [61], CharXiv [128], and VCR [148]. Part of results are collected from [34, 31, 3, 128, 148] and the OpenCompass leaderboard [26]." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 82, + 574, + 526, + 619 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 574, + 526, + 619 + ], + "spans": [ + { + "bbox": [ + 82, + 574, + 526, + 619 + ], + "type": "text", + "content": "at the 2B scale; InternVL3-2B attains a remarkable 65.9 on Mantis-Eval, representing an improvement of over 11 points relative to InternVL2.5-2B, and also boosts its MMT-Bench performance to 59.5. Such enhancements indicate that the advanced pre-training strategies and enhanced training datasets in InternVL3 significantly elevate its capability to capture and reason over inter-image relationships." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 82, + 624, + 527, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 624, + 527, + 723 + ], + "spans": [ + { + "bbox": [ + 82, + 624, + 527, + 723 + ], + "type": "text", + "content": "At higher scales, the trend continues. InternVL3-8B and its subsequent larger variants not only secure steady improvements on BLINK and MMT-Bench but also demonstrate substantial gains on the MIRB and MuirBench benchmarks. In particular, InternVL3-78B reaches a BLINK score of 66.3 and an MMT-Bench score of 73.2, positioning it as a competitive alternative to leading closed-source models like GPT-4o. These results suggest that the learning multimodal capabilities via native multimodal pre-training and the scaling of model parameters are key contributors to the elevated performance observed across diverse evaluation settings. Despite these encouraging outcomes, a noticeable performance gap between our InternVL3 and other MLLMs like Qwen2.5-VL still exists on certain benchmarks, such as MuirBench, implying that future work may benefit from further enhancements in training data curation and additional model refinements." + } + ] + } + ], + "index": 3 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 4 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 85, + 69, + 526, + 415 + ], + "blocks": [ + { + "bbox": [ + 85, + 69, + 526, + 415 + ], + "lines": [ + { + "bbox": [ + 85, + 69, + 526, + 415 + ], + "spans": [ + { + "bbox": [ + 85, + 69, + 526, + 415 + ], + "type": "table", + "html": "
Model NameBLINK (val)Mantis EvalMMIUMuir BenchMMT (val)MIRB (avg)OverallRealWorld QAMME-RW (EN)WildVision (win rate)R-Bench (dis)Overall
LLaVA-OneVision-0.5B [60]52.139.6-25.5---55.6----
InternVL2-1B [19]38.646.137.329.349.531.538.750.340.217.855.641.0
InternVL2.5-1B [18]42.051.238.529.950.335.641.357.544.243.459.051.0
InternVL3-1B42.950.239.331.252.936.142.158.246.043.860.452.1
Qwen2-VL-2B [121]44.4---55.1--62.6----
Qwen2.5-VL-3B [6]47.6--47.7---65.453.1---
InternVL2-2B [19]43.848.439.832.550.432.141.257.347.331.856.848.3
InternVL2.5-2B [18]44.054.843.540.654.536.445.660.148.844.262.253.8
InternVL3-2B50.365.943.038.859.542.950.164.353.848.867.558.6
Qwen2-VL-7B [121]53.2---64.0--70.156.5-64.0-
Qwen2.5-VL-7B [6]56.4--59.6---68.557.4---
MiniCPM-V2.6 [135]53.069.0--60.8--65.0----
InternVL2-8B [19]50.965.442.048.760.050.052.864.453.554.467.960.1
InternVL2.5-8B [18]54.867.746.751.162.352.555.970.159.162.070.165.3
InternVL3-8B55.570.146.855.065.056.858.270.862.069.874.169.2
InternVL3-9B58.670.150.451.465.458.659.170.561.363.870.366.5
InternVL3-14B60.376.050.956.270.359.362.270.764.069.869.368.5
InternVL-Chat-V1.5 [19]46.666.837.438.558.050.349.666.049.456.667.960.0
InternVL2-26B [19]56.269.642.650.660.653.755.668.358.762.270.164.8
InternVL2.5-26B [18]61.875.649.461.166.955.761.874.561.865.272.968.6
Cambrian-34B [116]-------67.844.1---
InternVL2-40B [19]57.271.447.954.466.255.258.771.861.863.273.367.5
InternVL2.5-38B [18]63.278.355.362.770.061.265.173.564.066.472.169.0
InternVL3-38B64.077.957.463.871.862.366.275.667.371.673.372.0
GPT-4V [97]54.662.7-62.364.353.1-61.4-71.865.6-
GPT-4o-20240513 [97]68.0-55.768.065.4--75.445.280.677.769.7
Claude-3.5-Sonnet [3]--53.4----60.151.6---
Gemini-1.5-Pro [102]--53.4-64.5--67.538.2---
LLaVA-OneVision-72B [60]55.477.6-54.8---71.9----
Qwen2-VL-72B [121]----71.8--77.8----
Qwen2.5-VL-72B [6]64.4--70.7---75.763.2---
InternVL2-Llama3-76B [19]56.873.744.251.267.458.258.672.263.065.874.168.8
InternVL2.5-78B [18]63.877.055.863.570.861.165.378.762.971.477.272.6
InternVL3-78B66.379.360.464.573.264.368.078.065.473.677.473.6
", + "image_path": "82fc116de0b0bf8c97c7d043c9af4f27e7a29697d82db4c93020c1d15f127367.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 83, + 421, + 529, + 468 + ], + "lines": [ + { + "bbox": [ + 83, + 421, + 529, + 468 + ], + "spans": [ + { + "bbox": [ + 83, + 421, + 529, + 468 + ], + "type": "text", + "content": "Table 4: Comparison of multi-image and real-world understanding performance. Multi-image benchmarks include BLINK [39], Mantis-Eval [51], MMIU [95], MuirBench [118], MMT-Bench [137], and MIRB [153]. Real-world benchmarks encompass RealWorldQA [27], MME-RealWorld [151], WildVision [86], and R-Bench [62]. Part of the results are sourced from the benchmark papers and the OpenCompass leaderboard [26]." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 83, + 510, + 230, + 523 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 510, + 230, + 523 + ], + "spans": [ + { + "bbox": [ + 83, + 510, + 230, + 523 + ], + "type": "text", + "content": "3.5 Real-World Comprehension" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 82, + 541, + 527, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 541, + 527, + 640 + ], + "spans": [ + { + "bbox": [ + 82, + 541, + 527, + 640 + ], + "type": "text", + "content": "We evaluate the InternVL3 series on four real-world comprehension benchmarks—RealWorldQA [27], MME-RealWorld [151], WildVision [86], and R-Bench [62]—to assess its ability to tackle realistic and complex tasks. As shown in Table 4, even the smallest variant in the InternVL3 family (InternVL3-1B) demonstrates promising performance with a RealWorldQA score of 58.2, an MME-RealWorld score of 46.0, a WildVision win rate of 43.8, and an R-Bench score of 60.4. Scaling up the model yields further enhancements across all metrics. Mid-sized variants such as InternVL3-8B and InternVL3-14B continue this positive trend, with InternVL3-8B reporting a RealWorldQA score of 70.8 and an R-Bench score of 74.1. These improvements highlight the effectiveness of scaling, as larger models provide more robust representations and enhanced comprehension capabilities in real-world scenarios." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 82, + 644, + 527, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 644, + 527, + 723 + ], + "spans": [ + { + "bbox": [ + 82, + 644, + 527, + 723 + ], + "type": "text", + "content": "At the higher end of the scale, the InternVL3-38B and InternVL3-78B models achieve top-tier results among the InternVL3 series. Notably, InternVL3-78B records a RealWorldQA score of 78.0, an MME-RealWorld score of 65.4, a WildVision win rate of 73.6, and an R-Bench score of 77.4. When compared with competitive models, such as GPT-4o [97]—which scores 75.4 on RealWorldQA and 80.6 on WildVision—the InternVL3 series exhibits competitive strengths. InternVL3-78B not only surpasses GPT-4o on RealWorldQA and closely matches its R-Bench performance but also considerably outperforms it on MME-RealWorld, indicating an overall robust performance on tasks demanding both perceptual precision and comprehensive understanding." + } + ] + } + ], + "index": 4 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 5 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 85, + 69, + 526, + 425 + ], + "blocks": [ + { + "bbox": [ + 85, + 69, + 526, + 425 + ], + "lines": [ + { + "bbox": [ + 85, + 69, + 526, + 425 + ], + "spans": [ + { + "bbox": [ + 85, + 69, + 526, + 425 + ], + "type": "table", + "html": "
Model NameMME (sum)MMB (EN / CN)MMBv1.1 (EN)MMVet (turbo)MMVet2 (0613)MMStarOverallHallBench (avg)MMHal (score)CRPE (relation)POPE (avg)Overall
LLaVA-OneVision-0.5B [60]1438.061.6 / 55.559.632.2-37.7-27.9----
InternVL2-1B [19]1794.465.4 / 60.761.632.736.145.751.734.02.2557.587.345.3
InternVL2.5-1B [18]1950.570.7 / 66.368.448.843.250.158.939.02.4960.989.948.1
InternVL3-1B1934.472.6 / 67.969.959.547.551.561.941.42.5964.090.749.7
Qwen2-VL-2B [121]1872.074.9 / 73.572.249.5-48.0-41.7----
Qwen2.5-VL-3B [6]215779.1 / 78.177.461.8-55.9-46.3-73.6--
InternVL2-2B [19]1876.873.2 / 70.970.239.539.650.158.037.92.5266.388.348.8
InternVL2.5-2B [18]2138.274.7 / 71.972.260.852.353.765.342.62.9470.290.651.6
InternVL3-2B2221.281.1 / 78.478.662.253.960.769.842.53.2671.589.651.7
Qwen2-VL-7B [121]2326.883.0 / 80.580.762.0-60.7-50.63.4074.488.154.1
Qwen2.5-VL-7B [6]234783.5 / 83.482.667.1-63.9-52.9-76.4--
MiniCPM-V2.6 [135]2348.481.5 / 79.378.060.0-57.5-48.13.6075.287.353.6
InternVL2-8B [19]2210.381.7 / 81.279.554.252.362.069.245.23.3375.886.952.8
InternVL2.5-8B [18]2344.184.6 / 82.683.262.858.162.873.250.13.6578.490.655.7
InternVL3-8B2415.483.4 / 82.281.781.366.368.277.749.93.6176.391.155.2
InternVL3-9B2372.883.4 / 82.281.776.265.466.376.351.23.4775.090.455.0
InternVL3-14B2478.385.6 / 84.183.580.268.468.879.055.13.4977.390.256.5
InternVL-Chat-V1.5 [19]2194.282.2 / 82.080.361.551.557.369.750.33.1175.488.454.3
InternVL2-26B [19]2260.783.4 / 82.081.562.157.261.271.850.73.5575.688.054.5
InternVL2.5-26B [18]2373.385.4 / 85.584.265.060.866.575.255.03.7079.190.657.1
Cambrian-34B [116]-80.4 / 79.278.353.2-54.2-41.6----
InternVL2-40B [19]2307.586.8 / 86.585.165.563.865.475.756.93.7577.688.456.7
InternVL2.5-38B [18]2455.886.5 / 86.385.568.862.167.977.056.83.7178.390.757.4
InternVL3-38B2523.687.6 / 86.886.983.969.671.581.557.13.7777.190.657.1
GPT-4V [97]1926.681.0 / 80.280.067.566.356.070.746.5----
GPT-4o-20240513 [97]-83.4 / 82.183.169.171.064.7-55.04.0076.686.955.6
Claude-3-Opus [3]1586.863.3 / 59.260.151.755.845.755.537.8----
Claude-3.5-Sonnet [3]-82.6 / 83.580.970.171.865.1-55.5----
Gemini-1.5-Pro [102]-73.9 / 73.874.664.066.959.1-45.6----
LLaVA-OneVision-72B [60]2261.085.8 / 85.385.060.6-65.8-49.0----
Qwen2-VL-72B [121]2482.786.5 / 86.685.974.066.968.378.758.1----
Qwen2.5-VL-72B [6]2448.088.6 / 87.988.476.2-70.8-55.2-79.2--
InternVL2-Llama3-76B [19]2414.786.5 / 86.385.565.768.467.477.255.23.8377.689.056.4
InternVL2.5-78B [18]2494.588.3 / 88.587.472.365.569.579.257.43.8978.890.857.7
InternVL3-78B2549.889.0 / 88.787.781.370.072.582.059.13.8579.290.358.1
", + "image_path": "4dc344ceba0082d124ada389f229dd7aa3fbe789254ac6aab91cf6b16f3c9dcf.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 82, + 430, + 526, + 475 + ], + "lines": [ + { + "bbox": [ + 82, + 430, + 526, + 475 + ], + "spans": [ + { + "bbox": [ + 82, + 430, + 526, + 475 + ], + "type": "text", + "content": "Table 5: Comparison of comprehensive multimodal understanding and hallucination performance. Comprehensive multimodal benchmarks include MME [37], MMBench series [75], MMVet series [138, 139], and MMStar [13]. Hallucination benchmarks encompass HallusionBench [45], MMHal [111], CRPE [126], and POPE [67]. Part of the results are sourced from the benchmark papers and the OpenCompass leaderboard [26]." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 83, + 528, + 277, + 540 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 528, + 277, + 540 + ], + "spans": [ + { + "bbox": [ + 83, + 528, + 277, + 540 + ], + "type": "text", + "content": "3.6 Comprehensive Multimodal Evaluation" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 82, + 563, + 527, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 563, + 527, + 640 + ], + "spans": [ + { + "bbox": [ + 82, + 563, + 527, + 640 + ], + "type": "text", + "content": "The comprehensive multimodal evaluation is based on established benchmarks including MME [37], MMBench (evaluating both English and Chinese tasks) [75], MMBench v1.1 (English) [75], MMVet [138], MMVet v2 [139], and MMStar [13], as summarized in Table 5. Specifically, InternVL3-1B achieves an MMBench score of 72.6/67.9 (English/Chinese) and improves the MMBench v1.1 score to 69.9, compared to the InternVL2.5-1B baseline (70.7/66.3 and 68.4, respectively). The improvements become more pronounced at the 2B scale, where InternVL3-2B records an MME of 2221.2 and reaches an MMBench performance of 81.1/78.4, along with an MMBench v1.1 score of 78.6." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 82, + 644, + 529, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 644, + 529, + 723 + ], + "spans": [ + { + "bbox": [ + 82, + 644, + 529, + 723 + ], + "type": "text", + "content": "At larger scales, InternVL3 models consistently demonstrate superior performance. For example, the InternVL3-8B model achieves an MME of 2415.4, while the InternVL3-38B and InternVL3-78B models record MME scores of 2523.6 and 2549.8, respectively. The corresponding MMBench and MMBench v1.1 scores also show steady improvements, with InternVL3-78B attaining 89.0/88.7 for English/Chinese and 87.7 for English-only tasks. When compared with other competitive models, such as Qwen2-VL-72B and Qwen2.5-VL-72B, the InternVL3 series—especially the 78B variant—offers a consistent performance advantage on the multimodal understanding benchmarks." + } + ] + } + ], + "index": 4 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 312, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 312, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 312, + 750 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 5 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 97, + 69, + 511, + 319 + ], + "blocks": [ + { + "bbox": [ + 97, + 69, + 511, + 319 + ], + "lines": [ + { + "bbox": [ + 97, + 69, + 511, + 319 + ], + "spans": [ + { + "bbox": [ + 97, + 69, + 511, + 319 + ], + "type": "table", + "html": "
Model NameRefCOCORefCOCO+RefCOCOg
valtest-Atest-Bvaltest-Atest-Bvaltest
Grounding-DINO-L [74]90.693.288.282.889.075.986.187.086.6
UNINEXT-H [133]92.694.391.585.289.679.888.789.488.9
ONE-PEACE [122]92.694.289.388.892.283.289.289.389.8
Qwen2.5-VL-3B [6]89.191.784.082.488.074.185.285.785.0
InternVL3-1B85.890.181.776.684.169.282.882.681.6
InternVL3-2B89.892.686.484.089.276.587.687.286.7
Shikra-7B [12]87.090.680.281.687.472.182.382.282.9
Ferret-v2-13B [144]92.695.088.987.492.181.489.490.089.6
CogVLM-Grounding [123]92.894.889.088.792.983.489.890.890.3
MM1.5 [143]-92.586.7-88.777.8-87.1-
Qwen2-VL-7B [121]91.793.687.385.890.579.587.387.887.9
Qwen2.5-VL-7B [6]90.092.585.484.289.176.987.287.286.6
TextHawk2 [140]91.993.087.686.290.080.488.288.188.2
InternVL2-8B [19]87.191.180.779.887.971.482.782.782.9
InternVL2.5-8B [18]90.394.585.985.291.578.886.787.687.6
InternVL3-8B92.594.688.088.292.581.889.690.089.6
InternVL3-9B91.893.286.686.491.079.988.088.588.2
InternVL3-14B92.094.487.887.492.181.588.689.389.1
Qwen2-VL-72B [121]93.295.390.790.193.885.689.990.491.1
Qwen2.5-VL-72B [6]92.794.689.788.992.283.789.990.390.3
InternVL2-Llama3-76B [19]92.294.888.488.893.182.889.590.390.0
InternVL2.5-78B [18]93.795.692.590.494.786.992.792.292.3
InternVL3-38B93.295.190.289.893.285.291.491.591.2
InternVL3-78B93.495.490.390.193.885.391.591.591.4
", + "image_path": "4176fc7555918689b16ab5d68173c5f141920017a405d4eef3a48bf0f90258c7.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 83, + 324, + 526, + 348 + ], + "lines": [ + { + "bbox": [ + 83, + 324, + 526, + 348 + ], + "spans": [ + { + "bbox": [ + 83, + 324, + 526, + 348 + ], + "type": "text", + "content": "Table 6: Comparison of visual grounding performance. We evaluate InternVL's visual grounding capability on RefCOCO, RefCOCO+, and RefCOCOg datasets [56, 88]. Parts of the results are collected from [121]." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 83, + 369, + 269, + 381 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 369, + 269, + 381 + ], + "spans": [ + { + "bbox": [ + 83, + 369, + 269, + 381 + ], + "type": "text", + "content": "3.7 Multimodal Hallucination Evaluation" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 82, + 390, + 527, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 390, + 527, + 479 + ], + "spans": [ + { + "bbox": [ + 82, + 390, + 527, + 479 + ], + "type": "text", + "content": "We evaluate InternVL's propensity for hallucinations on four established benchmarks—HallusionBench [45], MMHal-Bench [111], CRPE [126], and POPE [67]—as detailed in Table 5. In comparison with previous InternVL series, the new InternVL3 models demonstrate overall competitive performance across varying scales, while providing consistent improvements in handling multimodal hallucination challenges. In the small-parameter regime, InternVL3-1B attains a HallusionBench score of 41.4, representing an appreciable gain over the InternVL2.5-1B baseline, which scored 39.0. Similarly, the 2B variant of InternVL3 shows a comparable HallusionBench performance (42.5) to its InternVL2.5 counterpart (42.6), while registering a modest improvement in CRPE performance (71.5 vs. 70.2)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 82, + 483, + 527, + 551 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 483, + 527, + 551 + ], + "spans": [ + { + "bbox": [ + 82, + 483, + 527, + 551 + ], + "type": "text", + "content": "In the large-scale setting, InternVL3-38B and InternVL3-78B are particularly noteworthy. InternVL3-38B obtains a HallusionBench score of 57.1, while InternVL3-78B reaches 59.1, accompanied by a CRPE improvement to 79.2. These figures position the InternVL3 series as competitive with leading closed- and open-source models such as GPT-4o and the Qwen2.5-VL series. Despite these advancements, minor declines on certain benchmarks, such as MMHal, indicate that although the InternVL3 series has made overall progress, optimizing data and training strategies to achieve more consistent improvements remains an important direction for future work." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 83, + 564, + 186, + 577 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 564, + 186, + 577 + ], + "spans": [ + { + "bbox": [ + 83, + 564, + 186, + 577 + ], + "type": "text", + "content": "3.8 Visual Grounding" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 82, + 585, + 526, + 631 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 585, + 526, + 631 + ], + "spans": [ + { + "bbox": [ + 82, + 585, + 526, + 631 + ], + "type": "text", + "content": "We evaluate InternVL's visual grounding capability on the RefCOCO [56], RefCOCO+[56], and RefCOCOg[88] datasets, where the model is tasked with accurately localizing target objects in images from given textual descriptions. Table 6 shows a comprehensive comparison across various models, including several specialized grounding models as well as multiple MLLLMs." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 82, + 634, + 527, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 634, + 527, + 723 + ], + "spans": [ + { + "bbox": [ + 82, + 634, + 527, + 723 + ], + "type": "text", + "content": "Among the smaller-scale models, we observe that while Qwen2.5-VL-3B achieves an average score of 85.0, the InternVL3-1B and InternVL3-2B models yield average scores of 81.6 and 86.7, respectively. Notably, when scaling up, the InternVL3 series exhibits promising improvements. InternVL3-8B, InternVL3-9B, and InternVL3-14B yield average scores around 88.2–89.6, reflecting a consistent trend of performance gains as the model size increases. However, when reaching larger scales, the performance gains appear to plateau. For instance, InternVL2.5-78B reaches an average score of 92.3, and InternVL3-78B only shows a score of 91.4. We speculate that this is because InternVL3's training data expansion does not include additional grounding-specific data and the relative reduction in grounding-targeted data could have restricted the localization capabilities." + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 85, + 70, + 526, + 376 + ], + "blocks": [ + { + "bbox": [ + 85, + 70, + 526, + 376 + ], + "lines": [ + { + "bbox": [ + 85, + 70, + 526, + 376 + ], + "spans": [ + { + "bbox": [ + 85, + 70, + 526, + 376 + ], + "type": "table", + "html": "
Model NameMMMBMultilingual MMBenchMTVQA (avg)Overall
enzhptartrruenzhptartrru
InternVL2-1B [19]73.267.455.553.543.855.267.961.250.843.331.852.712.640.7
InternVL2.5-1B [18]78.870.261.555.045.361.172.564.757.043.037.853.221.446.0
InternVL3-1B79.470.162.358.047.661.972.666.262.348.039.560.322.247.9
Qwen2-VL-2B [121]78.374.272.668.361.872.872.171.169.961.154.469.320.052.6
Qwen2.5-VL-3B [6]------------24.8-
InternVL2-2B [19]79.471.654.043.546.448.173.869.651.429.831.342.310.939.3
InternVL2.5-2B [18]81.474.458.248.346.453.276.571.655.937.333.944.821.845.2
InternVL3-2B81.978.375.468.662.974.681.377.875.966.459.570.726.757.4
mPLUG-Owl2 [136]67.361.059.745.845.462.666.259.458.237.947.760.4--
Qwen2-VL-7B [121]83.982.481.279.074.782.481.881.679.175.674.579.325.661.6
Qwen2.5-VL-7B [6]------------29.2-
InternVL2-8B [19]83.481.576.166.369.275.782.981.876.060.566.074.420.956.6
InternVL2.5-8B [18]84.383.178.669.371.579.583.883.279.464.367.877.327.660.4
InternVL3-8B85.183.182.581.676.283.485.585.683.279.275.982.630.264.7
InternVL3-9B84.883.780.669.968.580.886.585.279.164.368.379.127.160.7
InternVL3-14B85.784.783.183.779.383.686.785.883.281.180.783.831.666.2
InternVL-Chat-V1.5 [19]82.680.876.365.268.674.081.180.276.956.266.771.020.555.7
InternVL2-26B [19]83.881.778.068.869.376.382.781.877.861.969.674.417.756.2
InternVL2.5-26B [18]86.283.881.673.373.782.886.185.580.767.575.079.628.562.6
InternVL2-40B [19]85.384.181.170.374.281.486.285.882.864.074.281.820.659.7
InternVL2.5-38B [18]86.485.184.184.382.884.987.588.685.384.584.085.931.767.4
InternVL3-38B86.785.684.584.882.685.189.089.387.184.684.387.432.468.1
GPT-4V [97]75.074.271.573.569.073.177.674.472.572.370.574.822.056.1
GPT-4o [97]------------27.8-
Gemini-1.0-Pro [114]75.071.970.669.969.672.773.672.170.361.169.870.5--
Qwen2-VL-72B [121]86.885.385.284.884.285.386.987.285.883.584.485.330.967.2
Qwen2.5-VL-72B [6]------------31.7-
InternVL2-Llama3-76B [19]85.385.182.882.883.083.787.887.385.983.185.085.722.063.9
InternVL2.5-78B [18]86.385.685.184.883.185.490.089.787.483.384.986.331.968.0
InternVL3-78B87.286.685.586.584.686.189.490.388.786.186.688.132.568.9
", + "image_path": "38f44bcbaf1d9cf8b391869abd3803e6dde210f593578e499d64e4125c709616.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 83, + 383, + 526, + 417 + ], + "lines": [ + { + "bbox": [ + 83, + 383, + 526, + 417 + ], + "spans": [ + { + "bbox": [ + 83, + 383, + 526, + 417 + ], + "type": "text", + "content": "Table 7: Comparison of multimodal multilingual performance. We evaluate multilingual capabilities across 3 benchmarks, including MMMB [109], Multilingual MMBench [109] and MTVQA [113]. The languages evaluated are English (en), Chinese (zh), Portuguese (pt), Arabic (ar), Turkish (tr), and Russian (ru)." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 83, + 445, + 281, + 457 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 445, + 281, + 457 + ], + "spans": [ + { + "bbox": [ + 83, + 445, + 281, + 457 + ], + "type": "text", + "content": "3.9 Multimodal Multilingual Understanding" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 82, + 468, + 527, + 525 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 468, + 527, + 525 + ], + "spans": [ + { + "bbox": [ + 82, + 468, + 527, + 525 + ], + "type": "text", + "content": "We assess InternVL's multimodal multilingual understanding capabilities using benchmarks—MMMB, Multilingual MMBench [109], and MTVQA [113]—as shown in Table 7. The InternVL3 series demonstrates consistent improvements in multilingual performance compared to previous predecessors. For example, the lightweight InternVL3-1B already shows a modest improvement over InternVL2.5-1B, while the larger-scale variants, such as InternVL3-38B and InternVL3-78B, achieve significantly higher average scores across all three benchmarks." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 82, + 529, + 527, + 585 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 529, + 527, + 585 + ], + "spans": [ + { + "bbox": [ + 82, + 529, + 527, + 585 + ], + "type": "text", + "content": "Comparisons with other leading models further highlight the effectiveness of the InternVL3 series. Notably, the InternVL3 variants achieve performance that is competitive with or superior to models such as Qwen2-VL-72B [121] and Qwen2.5-VL-72B [6]. Overall, the enhanced performance of the InternVL3 series across MMMB, Multilingual MMBench, and MTVQA underscores the promise of our approach in advancing global multimodal applications." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 83, + 605, + 205, + 617 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 605, + 205, + 617 + ], + "spans": [ + { + "bbox": [ + 83, + 605, + 205, + 617 + ], + "type": "text", + "content": "3.10 Video Understanding" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 82, + 628, + 527, + 673 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 628, + 527, + 673 + ], + "spans": [ + { + "bbox": [ + 82, + 628, + 527, + 673 + ], + "type": "text", + "content": "Video understanding is essential for evaluating how well MLLMs capture temporal and multimodal cues in complex video content. In this work, we assess the InternVL3 series on six established benchmarks—Video-MME [38], MVBench [65], MMBench-Video [35], MLVU [154], LongVideoBench [129], and CG-Bench [2], as detailed in Table 8." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 82, + 677, + 526, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 677, + 526, + 723 + ], + "spans": [ + { + "bbox": [ + 82, + 677, + 526, + 723 + ], + "type": "text", + "content": "Overall, the InternVL3 models demonstrate clear performance improvements and a strong scalability trend over their predecessors. As the model capacity increases, the performance gains become more pronounced. For instance, InternVL3-2B records higher Video-MME scores (58.9/61.4) and improved MVBench and MLVU performance compared to the earlier 2B variants." + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 86, + 69, + 523, + 445 + ], + "blocks": [ + { + "bbox": [ + 86, + 69, + 523, + 445 + ], + "lines": [ + { + "bbox": [ + 86, + 69, + 523, + 445 + ], + "spans": [ + { + "bbox": [ + 86, + 69, + 523, + 445 + ], + "type": "table", + "html": "
Model NameVideo-MME (wo / w sub)MVBenchMMBench-Video (val)MLVU (M-Avg)LongVideoBench (val total)CG-Bench (long / clue acc.)Overall
InternVL2-1B [19]42.9 / 45.457.51.1451.643.3--
InternVL2.5-1B [18]50.3 / 52.364.31.3657.347.9--
InternVL3-1B51.0 / 53.063.11.353.048.124.8 / 39.146.9
Qwen2-VL-2B [121]55.6 / 60.463.2-----
Qwen2.5-VL-3B [7]61.5 / 67.667.01.6368.243.3--
InternVL2-2B [19]46.2 / 49.160.21.3054.346.0--
InternVL2.5-2B [18]51.9 / 54.168.81.4461.452.0--
InternVL3-2B58.9 / 61.470.41.4264.255.430.8 / 50.754.9
VideoChat2-HD [64]45.3 / 55.762.31.2247.9---
MiniCPM-V-2.6 [135]60.9 / 63.6-1.70-54.9--
LLaVA-OneVision-7B [60]58.2 / -56.7-----
Qwen2-VL-7B [121]63.3 / 69.067.01.44-55.6--
Qwen2.5-VL-7B [7]65.1 / 71.669.61.7970.245.3--
InternVL2-8B [19]56.3 / 59.365.81.5764.054.6--
InternVL2.5-8B [18]64.2 / 66.972.01.6868.960.0--
InternVL3-8B66.3 / 68.975.41.6971.458.838.6 / 55.261.4
InternVL3-9B66.7 / 68.974.31.6970.862.541.1 / 58.062.3
InternVL3-14B70.4 / 73.076.61.7373.363.944.1 / 60.664.9
InternVL2-26B [19]57.0 / 60.267.51.6764.256.1--
InternVL2.5-26B66.9 / 69.275.21.8672.359.9--
Oryx-1.5-32B [78]67.3 / 74.970.11.5272.3---
Qwen2.5-VL-32B [7]70.5 / 77.9-1.93----
VILA-1.5-40B [71]60.1 / 61.1-1.6156.7---
InternVL2-40B [19]66.1 / 68.672.01.7871.060.6--
InternVL2.5-38B [18]70.7 / 73.174.41.8275.363.3--
InternVL3-38B72.7 / 75.076.91.8177.867.346.9 / 62.867.5
GPT-4V/4T [1]59.9 / 63.343.71.5349.259.1--
GPT-4o-20240513 [97]71.9 / 77.2-1.6364.666.7--
GPT-4o-20240806 [97]--1.87--41.8 / 58.3-
Gemini-1.5-Pro [102]75.0 / 81.3-1.30-64.040.1 / 56.4-
VideoLLaMA2-72B [23]61.4 / 63.162.0-----
LLaVA-OneVision-72B [60]66.2 / 69.559.4-66.461.3--
Qwen2-VL-72B [121]71.2 / 77.873.61.70--41.3 / 56.2-
Qwen2.5-VL-72B [7]73.3 / 79.170.42.0274.660.7--
InternVL2-Llama3-76B [19]64.7 / 67.869.61.7169.961.1--
InternVL2.5-78B [18]72.1 / 74.076.41.9775.763.642.2 / 58.566.0
InternVL3-78B72.7 / 75.778.71.8179.565.748.4 / 65.368.3
", + "image_path": "63b137b26d089da6496b7caf50f6a13a7945a49735feef9c70d2644edfc567f2.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 82, + 449, + 526, + 495 + ], + "lines": [ + { + "bbox": [ + 82, + 449, + 526, + 495 + ], + "spans": [ + { + "bbox": [ + 82, + 449, + 526, + 495 + ], + "type": "text", + "content": "Table 8: Comparison of video understanding performance. We evaluate InternVL's video understanding capabilities across 6 benchmarks. For Video-MME [38], MMBench-Video [35], MLVU [154], and LongVideoBench [129], we test with four different settings: 16, 32, 48, and 64 frames, and report the maximum results. For MVBench [65], we conduct testing using 16 frames. For CG-Bench [2], we use 32 frames." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 82, + 516, + 526, + 572 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 516, + 526, + 572 + ], + "spans": [ + { + "bbox": [ + 82, + 516, + 526, + 572 + ], + "type": "text", + "content": "The scaling behavior of the InternVL3 series is further evident in the larger models. InternVL3-14B attains a Video-MME score of 70.4/73.0, while InternVL3-38B and InternVL3-78B push these metrics even higher, reaching scores of 72.7/75.0 and 72.7/75.7, respectively. Additionally, the inclusion of CG-Bench evaluations for the InternVL3 series provides further insight into long-range video reasoning, with performance steadily improving as model size increases—for example, InternVL3-78B attains 48.4/65.3 on CG-Bench." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 82, + 576, + 526, + 643 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 576, + 526, + 643 + ], + "spans": [ + { + "bbox": [ + 82, + 576, + 526, + 643 + ], + "type": "text", + "content": "When compared with other open-source models, the InternVL3 series demonstrates competitive advantages. For instance, while Qwen2.5-VL models achieve impressive results (with Qwen2.5-VL-72B scoring 73.3/79.1 on Video-MME), the InternVL3 series tends to outperform them in other metrics, such as MVBench and MLVU. Similarly, while closed-source systems like Gemini-1.5-Pro sometimes yield superior results on select benchmarks (e.g., Video-MME), the overall performance of InternVL3, especially at larger scales, is highly competitive." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 83, + 657, + 183, + 669 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 657, + 183, + 669 + ], + "spans": [ + { + "bbox": [ + 83, + 657, + 183, + 669 + ], + "type": "text", + "content": "3.11 GUI Grounding" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 82, + 677, + 527, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 677, + 527, + 723 + ], + "spans": [ + { + "bbox": [ + 82, + 677, + 527, + 723 + ], + "type": "text", + "content": "GUI grounding requires precise localization and understanding of interface elements, which is critical for applications like automated UI testing and assistive technologies. In Table 9, we report the performance on GUI grounding benchmarks, comparing InternVL3 with state-of-the-art multimodal and GUI-specific models. The results demonstrate that InternVL3 achieves competitive performance across different scales. On" + } + ] + } + ], + "index": 5 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 6 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 86, + 71, + 523, + 114 + ], + "blocks": [ + { + "bbox": [ + 86, + 71, + 523, + 114 + ], + "lines": [ + { + "bbox": [ + 86, + 71, + 523, + 114 + ], + "spans": [ + { + "bbox": [ + 86, + 71, + 523, + 114 + ], + "type": "table", + "html": "
MethodGPT-4oGemini 2.0ClaudeAguvis-72BQwen2.5-VL-72BUI-TARS-72BInternVL3-8B-38B-72B
ScreenSpot18.184.083.089.287.188.479.585.688.7
ScreenSpot-V2-----90.381.488.390.9
", + "image_path": "e5d9a7d6e77380d50286b1398b5cb57b706a8a9a17883f62629000da5c604ef9.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 85, + 141, + 523, + 258 + ], + "blocks": [ + { + "bbox": [ + 121, + 118, + 488, + 131 + ], + "lines": [ + { + "bbox": [ + 121, + 118, + 488, + 131 + ], + "spans": [ + { + "bbox": [ + 121, + 118, + 488, + 131 + ], + "type": "text", + "content": "Table 9: Performance of InternVL3 and other models on GUI grounding benchmarks." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 85, + 141, + 523, + 258 + ], + "lines": [ + { + "bbox": [ + 85, + 141, + 523, + 258 + ], + "spans": [ + { + "bbox": [ + 85, + 141, + 523, + 258 + ], + "type": "table", + "html": "
Model NameObj.countAbs.Dist.Obj.sizeRoom SizeRel.Dist.Rel.Dir.Route PlanAppr.OrderOverall
GPT-4o [97]46.25.343.838.237.041.331.528.534.0
Gemini-1.5 Pro [102]56.230.964.143.651.346.336.034.645.4
VILA-1.5-8B [71]17.421.850.318.832.134.831.024.828.9
LongVA-7B [145]38.016.638.922.233.143.325.415.729.2
LLaVA-NeXT-Video-7B [150]48.514.047.824.243.542.434.030.635.6
LLaVA-OneVision-7B [60]47.720.247.412.342.535.229.424.432.4
InternVL3-8B68.139.048.433.648.336.427.335.442.1
InternVL3-38B71.750.246.141.753.538.628.960.748.9
LLaVA-NeXT-Video-72B [150]48.922.857.435.342.436.735.048.640.9
LLaVA-OneVision-72B [60]43.523.957.637.542.539.932.544.640.2
InternVL3-78B71.253.744.439.555.939.528.954.548.4
", + "image_path": "64d16e3afefe4dacafd737d1aa038b03d98fb9ed727577c0deda1cb366e1d542.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 155, + 263, + 454, + 275 + ], + "lines": [ + { + "bbox": [ + 155, + 263, + 454, + 275 + ], + "spans": [ + { + "bbox": [ + 155, + 263, + 454, + 275 + ], + "type": "text", + "content": "Table 10: Performance of InternVL3 and other models on VSI-Bench." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 82, + 297, + 527, + 332 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 297, + 527, + 332 + ], + "spans": [ + { + "bbox": [ + 82, + 297, + 527, + 332 + ], + "type": "text", + "content": "ScreenSpot [22], InternVL3-72B achieves " + }, + { + "bbox": [ + 82, + 297, + 527, + 332 + ], + "type": "inline_equation", + "content": "88.7\\%" + }, + { + "bbox": [ + 82, + 297, + 527, + 332 + ], + "type": "text", + "content": " accuracy, slightly outperforming UI-TARS-72B [100] " + }, + { + "bbox": [ + 82, + 297, + 527, + 332 + ], + "type": "inline_equation", + "content": "(88.4\\%)" + }, + { + "bbox": [ + 82, + 297, + 527, + 332 + ], + "type": "text", + "content": " and Qwen2.5-VL-72B " + }, + { + "bbox": [ + 82, + 297, + 527, + 332 + ], + "type": "inline_equation", + "content": "(87.1\\%)" + }, + { + "bbox": [ + 82, + 297, + 527, + 332 + ], + "type": "text", + "content": ", while Aguvis-72B [132] leads with " + }, + { + "bbox": [ + 82, + 297, + 527, + 332 + ], + "type": "inline_equation", + "content": "89.2\\%" + }, + { + "bbox": [ + 82, + 297, + 527, + 332 + ], + "type": "text", + "content": ". Notably, InternVL3-38B " + }, + { + "bbox": [ + 82, + 297, + 527, + 332 + ], + "type": "inline_equation", + "content": "(85.6\\%)" + }, + { + "bbox": [ + 82, + 297, + 527, + 332 + ], + "type": "text", + "content": " surpasses GPT-4o " + }, + { + "bbox": [ + 82, + 297, + 527, + 332 + ], + "type": "inline_equation", + "content": "(18.1\\%)" + }, + { + "bbox": [ + 82, + 297, + 527, + 332 + ], + "type": "text", + "content": " and Gemini 2.0 " + }, + { + "bbox": [ + 82, + 297, + 527, + 332 + ], + "type": "inline_equation", + "content": "(84.0\\%)" + }, + { + "bbox": [ + 82, + 297, + 527, + 332 + ], + "type": "text", + "content": " by a significant margin." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 82, + 335, + 527, + 425 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 335, + 527, + 425 + ], + "spans": [ + { + "bbox": [ + 82, + 335, + 527, + 425 + ], + "type": "text", + "content": "For the more challenging ScreenSpot-V2 [130] benchmark, InternVL3 exhibits strong scaling behavior: InternVL3-72B achieves " + }, + { + "bbox": [ + 82, + 335, + 527, + 425 + ], + "type": "inline_equation", + "content": "90.9\\%" + }, + { + "bbox": [ + 82, + 335, + 527, + 425 + ], + "type": "text", + "content": ", outperforming UI-TARS-72B " + }, + { + "bbox": [ + 82, + 335, + 527, + 425 + ], + "type": "inline_equation", + "content": "(90.3\\%)" + }, + { + "bbox": [ + 82, + 335, + 527, + 425 + ], + "type": "text", + "content": ". The 8B variant " + }, + { + "bbox": [ + 82, + 335, + 527, + 425 + ], + "type": "inline_equation", + "content": "(81.4\\%)" + }, + { + "bbox": [ + 82, + 335, + 527, + 425 + ], + "type": "text", + "content": " already surpasses UI-TARS-72B, while the 38B model " + }, + { + "bbox": [ + 82, + 335, + 527, + 425 + ], + "type": "inline_equation", + "content": "(88.3\\%)" + }, + { + "bbox": [ + 82, + 335, + 527, + 425 + ], + "type": "text", + "content": " further closes the gap to the 72B version. These results highlight InternVL3's robustness in GUI understanding tasks, particularly in handling complex screen layouts and dynamic interfaces. The performance improvements with model scale suggest that larger architectures better capture the fine-grained visual-textual alignments required for precise GUI grounding. The superior performance of the InternVL3 models highlights their robustness in interpreting complex visual layouts. Future work will explore extending these capabilities to more dynamic and interactive GUI environments." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 83, + 437, + 192, + 449 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 437, + 192, + 449 + ], + "spans": [ + { + "bbox": [ + 83, + 437, + 192, + 449 + ], + "type": "text", + "content": "3.12 Spatial Reasoning" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 82, + 457, + 527, + 536 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 457, + 527, + 536 + ], + "spans": [ + { + "bbox": [ + 82, + 457, + 527, + 536 + ], + "type": "text", + "content": "Spatial reasoning involves constructing a mental representation of a three-dimensional environment from visual inputs—a capability that is vital for applications such as autonomous driving. Table 10 reports the performance results on the Visual-Spatial Intelligence Benchmark (VSI-Bench) [134], where InternVL3 is compared against other state-of-the-art MLLMs. The results clearly indicate that InternVL3 outperforms its competitors in spatial reasoning tasks. In particular, the InternVL3-8B variant achieves a score of 42.1, leading all open-source MLLMs in the benchmark. Moreover, the InternVL3-38B and InternVL3-78B variants score 48.9 and 48.4, respectively—both superior to proprietary models such as GPT-4o, Gemini-1.5 Flash, and Gemini-1.5 Pro." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 82, + 539, + 527, + 596 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 539, + 527, + 596 + ], + "spans": [ + { + "bbox": [ + 82, + 539, + 527, + 596 + ], + "type": "text", + "content": "Furthermore, InternVL3 exhibits exceptional performance in several sub-category tasks within the benchmark. It attains a score of 71.2 in object counting, 53.7 in absolute distance estimation, 55.9 in relative distance estimation, and 54.5 in appearance order prediction, demonstrating its robust spatial reasoning capabilities. These promising results underscore the potential of InternVL3 for advancing 3D scene understanding, and future work will explore its integration into various downstream applications." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 83, + 608, + 265, + 620 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 608, + 265, + 620 + ], + "spans": [ + { + "bbox": [ + 83, + 608, + 265, + 620 + ], + "type": "text", + "content": "3.13 Evaluation on Language Capability" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 82, + 628, + 527, + 686 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 628, + 527, + 686 + ], + "spans": [ + { + "bbox": [ + 82, + 628, + 527, + 686 + ], + "type": "text", + "content": "Table 11 presents the performance evaluation of language capabilities across a diverse array of benchmarks. These benchmarks cover comprehensive assessments in general knowledge, linguistic understanding, reasoning, mathematics, and coding tasks, such as MMLU [46], CMMLU [63], C-Eval [48], GAOKAO-Bench [149], TriviaQA [52], NaturalQuestions [58, 110], RACE [59], WinoGrande [103], HellaSwag [142], BigBench Hard [112], GSM8K-Test [25], MATH [47], TheoremQA [17], HumanEval [14], MBPP [4], and MBPP-CN [4]." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 82, + 689, + 527, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 689, + 527, + 723 + ], + "spans": [ + { + "bbox": [ + 82, + 689, + 527, + 723 + ], + "type": "text", + "content": "In particular, the experiments conducted compare the performance of Qwen2.5 chat models against corresponding InternVL3 variants. Both model series share the same pre-trained Qwen2.5 base model as their initialization. After undergoing native multimodal pre-training followed by additional post-training, the In" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 107, + 68, + 501, + 340 + ], + "blocks": [ + { + "bbox": [ + 107, + 68, + 501, + 340 + ], + "lines": [ + { + "bbox": [ + 107, + 68, + 501, + 340 + ], + "spans": [ + { + "bbox": [ + 107, + 68, + 501, + 340 + ], + "type": "table", + "html": "
DatasetVersionQwen2.5-0.5B ChatQwen2.5-1.5B ChatQwen2.5-7B ChatQwen2.5-14B ChatQwen2.5-32B ChatQwen2.5-72B Chat
InterVL3-1BInterVL3-2BInterVL3-8BInterVL3-14BInterVL3-38BInterVL3-78B
MMLU4d595a46.449.861.864.874.277.379.582.183.385.484.486.9
CMMLUc1336547.256.762.972.278.884.482.685.885.888.787.489.9
C-Eval2daf2453.559.066.273.377.884.581.485.686.589.288.189.5
GAOKAO4c31db30.946.653.767.781.389.586.991.290.893.591.093.1
TriviaQA2121ce24.221.539.841.255.851.565.167.465.870.174.074.7
NaturalQuestions3dceal8.28.515.215.917.928.219.731.419.731.023.839.0
C38c358f35.266.381.284.790.895.192.196.392.397.496.197.6
RACE-High69ee4f51.568.876.084.686.890.889.693.091.594.291.794.2
WinoGrandeb3677047.252.956.561.971.578.179.184.383.886.783.987.8
HellaSwage4271039.347.062.073.885.490.290.593.092.195.592.795.6
BBH5b92b021.534.539.752.065.777.473.082.585.587.785.485.2
GSM8K1d7fe439.047.261.672.580.183.182.488.484.789.788.290.5
MATH39342427.832.749.357.372.672.273.776.381.172.281.478.9
TheoremQA6f0af812.312.914.415.620.125.518.524.121.918.922.930.4
HumanEval8e312c27.439.051.862.882.378.181.178.189.087.887.282.3
MBPPa447ff38.547.551.460.774.369.376.775.183.777.486.876.7
MBPP-CN9114d519.630.634.445.864.464.475.467.277.875.476.076.0
Overall-33.542.451.659.269.472.973.476.677.478.978.980.5
", + "image_path": "f5286d02710ce84555dfeebe50c2c99b828fdae3c8f2d0f77f6a37f2af63c8b8.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 85, + 432, + 526, + 527 + ], + "blocks": [ + { + "bbox": [ + 85, + 346, + 526, + 401 + ], + "lines": [ + { + "bbox": [ + 85, + 346, + 526, + 401 + ], + "spans": [ + { + "bbox": [ + 85, + 346, + 526, + 401 + ], + "type": "text", + "content": "Table 11: Comparison of language model performance across multiple benchmarks. These results were obtained using the OpenCompass toolkit. We compare InternVL3 with Qwen2.5 Chat models, whose corresponding pre-trained base models are employed as the initialization of the language component in InternVL3. Please note that the evaluation scores of the Qwen2.5 series may differ from those officially reported, as we have adopted the prompt versions provided in the table across all datasets for OpenCompass evaluation." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 85, + 432, + 526, + 527 + ], + "lines": [ + { + "bbox": [ + 85, + 432, + 526, + 527 + ], + "spans": [ + { + "bbox": [ + 85, + 432, + 526, + 527 + ], + "type": "table", + "html": "
V2PEδTextVQA valVizWiz valChartQA test avgDocVQA valAI2D testInfoVQA valGQA testSQA-I testPOPETiny LVLMMMMU valSEED v1 imageOverall
X-78.461.781.489.481.169.460.894.487.9348.552.675.675.2
1/25678.061.781.288.581.067.761.094.488.3345.352.975.975.0
1/6478.362.081.789.481.369.660.994.788.3345.752.376.175.3
1/1678.762.181.790.481.670.461.195.088.2345.053.376.175.6
1/479.062.282.491.081.871.761.294.988.1345.852.676.275.9
1/178.761.782.290.281.771.461.294.688.5347.252.476.175.7
", + "image_path": "a882683b95d10eac481fb7abb163436df7ccda6ca735de98d6fb0212917f3480.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 85, + 532, + 526, + 565 + ], + "lines": [ + { + "bbox": [ + 85, + 532, + 526, + 565 + ], + "spans": [ + { + "bbox": [ + 85, + 532, + 526, + 565 + ], + "type": "text", + "content": "Table 12: Performance of the pre-trained InternVL3-8B model on multimodal benchmarks with different positional encoding strategies. When employing V2PE, the impact of different positional increment values " + }, + { + "bbox": [ + 85, + 532, + 526, + 565 + ], + "type": "inline_equation", + "content": "\\delta" + }, + { + "bbox": [ + 85, + 532, + 526, + 565 + ], + "type": "text", + "content": " is systematically evaluated." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 85, + 607, + 525, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 607, + 525, + 628 + ], + "spans": [ + { + "bbox": [ + 85, + 607, + 525, + 628 + ], + "type": "text", + "content": "ternVL3 series consistently demonstrates superior performance over the Qwen2.5 chat models across most evaluation benchmarks." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 85, + 634, + 526, + 721 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 634, + 526, + 721 + ], + "spans": [ + { + "bbox": [ + 85, + 634, + 526, + 721 + ], + "type": "text", + "content": "This observed enhancement in language capabilities primarily arises from several factors, including the integration of approximately " + }, + { + "bbox": [ + 85, + 634, + 526, + 721 + ], + "type": "inline_equation", + "content": "25\\%" + }, + { + "bbox": [ + 85, + 634, + 526, + 721 + ], + "type": "text", + "content": " pure-language data, joint parameter optimization during native multimodal pre-training, and the extensive use of high-quality textual corpora during the subsequent post-training stage. Such an approach not only strengthens multimodal comprehension but also significantly enhances language proficiency. Consequently, even when derived from identical pre-trained base models, the integrated multimodal and pure-text training strategy employed by InternVL3 results in substantially improved performance in language capabilities compared to the specialized training pipeline designed for pure-text tasks used by the Qwen2.5 chat models." + } + ] + } + ], + "index": 5 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 301, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 301, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 301, + 741, + 310, + 750 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 6 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 86, + 73, + 523, + 285 + ], + "blocks": [ + { + "bbox": [ + 86, + 73, + 523, + 285 + ], + "lines": [ + { + "bbox": [ + 86, + 73, + 523, + 285 + ], + "spans": [ + { + "bbox": [ + 86, + 73, + 523, + 285 + ], + "type": "image", + "image_path": "ec927fbd80730a141c1d6b95f58d4e1e79c5b82d743ca43baae18e32892499e3.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 83, + 293, + 526, + 319 + ], + "lines": [ + { + "bbox": [ + 83, + 293, + 526, + 319 + ], + "spans": [ + { + "bbox": [ + 83, + 293, + 526, + 319 + ], + "type": "text", + "content": "Figure 3: Performance comparison on multimodal benchmarks under different training strategies. Native multimodal pre-training endows MLLMs with strong multimodal capabilities, even without further post-training." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 86, + 329, + 525, + 485 + ], + "blocks": [ + { + "bbox": [ + 86, + 329, + 525, + 485 + ], + "lines": [ + { + "bbox": [ + 86, + 329, + 525, + 485 + ], + "spans": [ + { + "bbox": [ + 86, + 329, + 525, + 485 + ], + "type": "table", + "html": "
ModelMPOMMMUMathVistaMathVisionMathVerseDynaMathWeMathLogicVistaOverall
InternVL3-1B43.447.213.818.14.214.731.124.6
43.445.818.818.75.813.429.825.1 (+0.5)
InternVL3-2B49.159.022.023.213.418.130.030.7
48.657.021.725.314.622.436.932.4 (+1.7)
InternVL3-8B61.967.424.736.922.832.743.241.4
62.771.629.339.825.537.144.144.3 (+2.9)
InternVL3-9B59.068.828.932.223.032.546.541.6
57.771.527.635.326.733.849.243.1 (+1.5)
InternVL3-14B67.170.531.238.827.938.149.946.2
67.175.137.244.431.343.051.249.9 (+3.7)
InternVL3-38B69.371.234.245.122.241.754.448.3
70.175.134.248.235.348.658.452.8 (+4.5)
InternVL3-78B72.274.035.244.231.742.553.550.5
72.279.043.151.035.146.155.954.6 (+4.1)
", + "image_path": "161d8384d9e1a45f1bfa6d5a124a923c74d772f4c9ab302fb98daff5df6aad86.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 88, + 490, + 517, + 502 + ], + "lines": [ + { + "bbox": [ + 88, + 490, + 517, + 502 + ], + "spans": [ + { + "bbox": [ + 88, + 490, + 517, + 502 + ], + "type": "text", + "content": "Table 13: Comparison of reasoning abilities before and after Mixed Preference Optimization (MPO)." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 83, + 525, + 178, + 537 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 525, + 178, + 537 + ], + "spans": [ + { + "bbox": [ + 83, + 525, + 178, + 537 + ], + "type": "text", + "content": "3.14 Ablation Study" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 82, + 546, + 527, + 624 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 546, + 527, + 624 + ], + "spans": [ + { + "bbox": [ + 82, + 546, + 527, + 624 + ], + "type": "text", + "content": "The Effectiveness of Native Multimodal Pre-Training. To assess the effectiveness of native multimodal pre-training, we conduct experiments on the InternVL2-8B model while keeping its architecture, initialization parameters, and training data entirely unchanged. Traditionally, InternVL2-8B employs a training pipeline that begins with an MLP warmup phase for multimodal alignment, followed by an instruction-tuning stage. In our experiments, we substitute the conventional MLP warmup phase with our native multimodal pre-training process. This modification isolates the contribution of native multimodal pre-training to the overall multimodal capability of the model." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 82, + 628, + 527, + 685 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 628, + 527, + 685 + ], + "spans": [ + { + "bbox": [ + 82, + 628, + 527, + 685 + ], + "type": "text", + "content": "The evaluation results in Figure 3 show that the model with native multimodal pre-training exhibits performance on most benchmarks that is comparable to the fully multi-stage-trained InternVL2-8B baseline. Furthermore, when followed by instruction tuning on higher-quality data, the model demonstrates further performance gains across evaluated multimodal tasks. These findings underscore the efficiency of native multimodal pre-training in imparting powerful multimodal capabilities to MLLMs." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 82, + 689, + 527, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 689, + 527, + 723 + ], + "spans": [ + { + "bbox": [ + 82, + 689, + 527, + 723 + ], + "type": "text", + "content": "The Evaluation of Variable Visual Position Encoding. To promote the multimodal capabilities in long-context scenarios, InternVL3 employs Variable Visual Position Encoding (V2PE) in its visual embedding. However, in the original V2PE [42], this specialized positional encoding for visual tokens did not yield benefits on" + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "bbox": [ + 82, + 72, + 529, + 106 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 72, + 529, + 106 + ], + "spans": [ + { + "bbox": [ + 82, + 72, + 529, + 106 + ], + "type": "text", + "content": "multimodal tasks with moderate context lengths. To further explore the efficacy of V2PE in a broader setting, we incorporated it during the native multimodal pre-training stage and evaluated the InternVL3-8B pre-trained model on standard multimodal benchmarks." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 82, + 110, + 526, + 177 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 110, + 526, + 177 + ], + "spans": [ + { + "bbox": [ + 82, + 110, + 526, + 177 + ], + "type": "text", + "content": "As reported in Table 12, the introduction of V2PE leads to significant performance gains across most evaluation metrics. In addition, our ablation studies—by varying the positional increment " + }, + { + "bbox": [ + 82, + 110, + 526, + 177 + ], + "type": "inline_equation", + "content": "\\delta" + }, + { + "bbox": [ + 82, + 110, + 526, + 177 + ], + "type": "text", + "content": "—reveal that even for tasks primarily involving short contexts, relatively small " + }, + { + "bbox": [ + 82, + 110, + 526, + 177 + ], + "type": "inline_equation", + "content": "\\delta" + }, + { + "bbox": [ + 82, + 110, + 526, + 177 + ], + "type": "text", + "content": " values can achieve optimal performance. These findings provide important insights for future efforts aimed at refining position encoding strategies for visual tokens in MLLMs. It is important to note that, to ensure fair comparisons, all results elsewhere in this report maintain a fixed " + }, + { + "bbox": [ + 82, + 110, + 526, + 177 + ], + "type": "inline_equation", + "content": "\\delta = 1" + }, + { + "bbox": [ + 82, + 110, + 526, + 177 + ], + "type": "text", + "content": ", except for the experimental results presented in Table 12." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 82, + 181, + 526, + 248 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 181, + 526, + 248 + ], + "spans": [ + { + "bbox": [ + 82, + 181, + 526, + 248 + ], + "type": "text", + "content": "Mixed Preference Optimization. Here, we demonstrate the effectiveness of MPO. As shown in Table 13, models fine-tuned with MPO demonstrate superior reasoning performance across seven multimodal reasoning benchmarks compared to their counterparts without MPO. Specifically, InternVL3-78B and InternVL3-38B outperform their counterparts by 4.1 and 4.5 points, respectively. Notably, the training data used for MPO is a subset of that used for SFT, indicating that the performance improvements primarily stem from the training algorithm rather than the training data." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 83, + 265, + 162, + 277 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 265, + 162, + 277 + ], + "spans": [ + { + "bbox": [ + 83, + 265, + 162, + 277 + ], + "type": "text", + "content": "4 Conclusion" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 82, + 290, + 529, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 290, + 529, + 422 + ], + "spans": [ + { + "bbox": [ + 82, + 290, + 529, + 422 + ], + "type": "text", + "content": "We have introduced InternVL3, a significant advancement in the InternVL series that implements a native multimodal pre-training paradigm. By jointly learning linguistic and multimodal capabilities during the pretraining phase, InternVL3 avoids the training complexities and optimization challenges typically associated with post-hoc MLLM training pipelines. Through the incorporation of variable visual position encoding (V2PE) for extended multimodal contexts, advanced post-training strategies—such as supervised fine-tuning and mixed preference optimization—and test-time scaling, InternVL3 establishes a new open-source benchmark across a wide range of multimodal tasks, while simultaneously preserving robust linguistic competencies. Notably, InternVL3-78B attains a 72.2-point score on the MMMU benchmark, exceeding previous open-source MLLMs and reducing the performance gap relative to leading proprietary counterparts (e.g., Gemini-2.5 Pro). In line with our commitment to fostering community-driven innovation in multimodal large language models, we will publicly release InternVL3's training data and model weights, thereby encouraging further research and development in this rapidly evolving field." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 84, + 437, + 142, + 449 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 437, + 142, + 449 + ], + "spans": [ + { + "bbox": [ + 84, + 437, + 142, + 449 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 93, + 456, + 526, + 720 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 93, + 456, + 526, + 488 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 93, + 456, + 526, + 488 + ], + "spans": [ + { + "bbox": [ + 93, + 456, + 526, + 488 + ], + "type": "text", + "content": "[1] Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ilge Akkaya, Florencia Leoni Aleman, Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, et al. Gpt-4 technical report. arXiv preprint arXiv:2303.08774, 2023. 15" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 93, + 497, + 526, + 518 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 93, + 497, + 526, + 518 + ], + "spans": [ + { + "bbox": [ + 93, + 497, + 526, + 518 + ], + "type": "text", + "content": "[2] Anonymous. CG-bench: Clue-grounded question answering benchmark for long video understanding. In Submitted to The Thirteenth International Conference on Learning Representations, 2024. under review. 14, 15" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 93, + 528, + 526, + 548 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 93, + 528, + 526, + 548 + ], + "spans": [ + { + "bbox": [ + 93, + 528, + 526, + 548 + ], + "type": "text", + "content": "[3] Anthropic. The claude 3 model family: Opus, sonnet, haiku. https://www.anthropic.com, 2024. 2, 8, 9, 10, 11, 12" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 93, + 558, + 526, + 587 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 93, + 558, + 526, + 587 + ], + "spans": [ + { + "bbox": [ + 93, + 558, + 526, + 587 + ], + "type": "text", + "content": "[4] Jacob Austin, Augustus Odena, Maxwell Nye, Maarten Bosma, Henryk Michalewski, David Dohan, Ellen Jiang, Carrie Cai, Michael Terry, Quoc Le, et al. Program synthesis with large language models. arXiv preprint arXiv:2108.07732, 2021. 16" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 93, + 599, + 526, + 629 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 93, + 599, + 526, + 629 + ], + "spans": [ + { + "bbox": [ + 93, + 599, + 526, + 629 + ], + "type": "text", + "content": "[5] Jinze Bai, Shuai Bai, Shusheng Yang, Shijie Wang, Sinan Tan, Peng Wang, Junyang Lin, Chang Zhou, and Jingren Zhou. Qwen-vl: A frontier large vision-language model with versatile abilities. arXiv preprint arXiv:2308.12966, 2023. 1, 2" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 93, + 639, + 526, + 661 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 93, + 639, + 526, + 661 + ], + "spans": [ + { + "bbox": [ + 93, + 639, + 526, + 661 + ], + "type": "text", + "content": "[6] Shuai Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Sibo Song, Kai Dang, Peng Wang, Shijie Wang, Jun Tang, et al. Qwen2. 5-vl technical report. arXiv preprint arXiv:2502.13923, 2025. 11, 12, 13, 14" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 93, + 670, + 526, + 692 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 93, + 670, + 526, + 692 + ], + "spans": [ + { + "bbox": [ + 93, + 670, + 526, + 692 + ], + "type": "text", + "content": "[7] Shuai Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Sibo Song, Kai Dang, Peng Wang, Shijie Wang, Jun Tang, et al. Qwen2.5-vl technical report. arXiv preprint arXiv:2502.13923, 2025. 1, 2, 9, 10, 15" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 93, + 700, + 526, + 720 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 93, + 700, + 526, + 720 + ], + "spans": [ + { + "bbox": [ + 93, + 700, + 526, + 720 + ], + "type": "text", + "content": "[8] Loubna Ben Allal, Anton Lozhkov, Guilherme Penedo, Thomas Wolf, and Leandro von Werra. Smoll-m-corpus, 2024. 5" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "19" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 18 + }, + { + "para_blocks": [ + { + "bbox": [ + 89, + 72, + 527, + 723 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 92, + 72, + 526, + 105 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 92, + 72, + 526, + 105 + ], + "spans": [ + { + "bbox": [ + 92, + 72, + 526, + 105 + ], + "type": "text", + "content": "[9] Ali Furkan Biten, Ruben Tito, Andres Mafla, Lluis Gomez, Marçal Rusinol, Ernest Valveny, CV Jawahar, and Dimosthenis Karatzas. Scene text visual question answering. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 4291-4301, 2019. 6" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 89, + 111, + 526, + 143 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 111, + 526, + 143 + ], + "spans": [ + { + "bbox": [ + 89, + 111, + 526, + 143 + ], + "type": "text", + "content": "[10] Jie Cao and Jing Xiao. An augmented benchmark dataset for geometric question answering through dual parallel text encoding. In Proceedings of the 29th International Conference on Computational Linguistics, pages 1511-1520, 2022. 6" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 89, + 150, + 526, + 172 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 150, + 526, + 172 + ], + "spans": [ + { + "bbox": [ + 89, + 150, + 526, + 172 + ], + "type": "text", + "content": "[11] Shuaichen Chang, David Palzer, Jialin Li, Eric Fosler-Lussier, and Ningchuan Xiao. Mapqa: A dataset for question answering on choropleth maps. arXiv preprint arXiv:2211.08545, 2022. 6" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 90, + 178, + 526, + 201 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 90, + 178, + 526, + 201 + ], + "spans": [ + { + "bbox": [ + 90, + 178, + 526, + 201 + ], + "type": "text", + "content": "[12] Keqin Chen, Zhao Zhang, Weili Zeng, Richong Zhang, Feng Zhu, and Rui Zhao. Shikra: Unleashing multimodal lmm's referential dialogue magic. arXiv preprint arXiv:2306.15195, 2023. 13" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 89, + 207, + 527, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 207, + 527, + 239 + ], + "spans": [ + { + "bbox": [ + 89, + 207, + 527, + 239 + ], + "type": "text", + "content": "[13] Lin Chen, Jinsong Li, Xiaoyi Dong, Pan Zhang, Yuhang Zang, Zehui Chen, Haodong Duan, Jiaqi Wang, Yu Qiao, Dahua Lin, et al. Are we on the right way for evaluating large vision-language models? arXiv preprint arXiv:2403.20330, 2024. 12" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 89, + 246, + 527, + 277 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 246, + 527, + 277 + ], + "spans": [ + { + "bbox": [ + 89, + 246, + 527, + 277 + ], + "type": "text", + "content": "[14] Mark Chen, Jerry Tworek, Heewoo Jun, Qiming Yuan, Henrique Ponde de Oliveira Pinto, Jared Kaplan, Harri Edwards, Yuri Burda, Nicholas Joseph, Greg Brockman, et al. Evaluating large language models trained on code. arXiv preprint arXiv:2107.03374, 2021. 16" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 89, + 285, + 527, + 317 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 285, + 527, + 317 + ], + "spans": [ + { + "bbox": [ + 89, + 285, + 527, + 317 + ], + "type": "text", + "content": "[15] Qiaoling Chen, Diandian Gu, Guoteng Wang, Xun Chen, YingTong Xiong, Ting Huang, Qinghao Hu, Xin Jin, Yonggang Wen, Tianwei Zhang, et al. Internevo: Efficient long-sequence large language model training via hybrid parallelism and redundant sharding. arXiv preprint arXiv:2401.09149, 2024. 2, 7" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 90, + 323, + 526, + 346 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 90, + 323, + 526, + 346 + ], + "spans": [ + { + "bbox": [ + 90, + 323, + 526, + 346 + ], + "type": "text", + "content": "[16] Qiguang Chen, Libo Qin, Jin Zhang, Zhi Chen, Xiao Xu, and Wanxiang Che. M3cot: A novel benchmark for multi-domain multi-step multi-modal chain-of-thought. arXiv preprint arXiv:2405.16473, 2024. 6" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 89, + 353, + 527, + 394 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 353, + 527, + 394 + ], + "spans": [ + { + "bbox": [ + 89, + 353, + 527, + 394 + ], + "type": "text", + "content": "[17] Wenhu Chen, Ming Yin, Max Ku, Pan Lu, Yixin Wan, Xueguang Ma, Jianyu Xu, Xinyi Wang, and Tony Xia. Theoremqa: A theorem-driven question answering dataset. In Houda Bouamor, Juan Pino, and Kalika Bali, editors, Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, EMNLP 2023, Singapore, December 6-10, 2023, pages 7889-7901. Association for Computational Linguistics, 2023. 16" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 89, + 401, + 527, + 433 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 401, + 527, + 433 + ], + "spans": [ + { + "bbox": [ + 89, + 401, + 527, + 433 + ], + "type": "text", + "content": "[18] Zhe Chen, Weiyun Wang, Yue Cao, Yangzhou Liu, Zhangwei Gao, Erfei Cui, Jinguo Zhu, Shenglong Ye, Hao Tian, Zhaoyang Liu, et al. Expanding performance boundaries of open-source multimodal models with model, data, and test-time scaling. arXiv preprint arXiv:2412.05271, 2024. 1, 2, 3, 5, 6, 9, 10, 11, 12, 13, 14, 15" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 89, + 440, + 527, + 472 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 440, + 527, + 472 + ], + "spans": [ + { + "bbox": [ + 89, + 440, + 527, + 472 + ], + "type": "text", + "content": "[19] Zhe Chen, Weiyun Wang, Hao Tian, Shenglong Ye, Zhangwei Gao, Erfei Cui, Wenwen Tong, Kongzhi Hu, Jiapeng Luo, Zheng Ma, et al. How far are we to gpt-4v? closing the gap to commercial multimodal models with open-source suites. arXiv preprint arXiv:2404.16821, 2024. 1, 3, 10, 11, 12, 13, 14, 15" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 89, + 479, + 527, + 510 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 479, + 527, + 510 + ], + "spans": [ + { + "bbox": [ + 89, + 479, + 527, + 510 + ], + "type": "text", + "content": "[20] Zhe Chen, Weiyun Wang, Hao Tian, Shenglong Ye, Zhangwei Gao, Erfei Cui, Wenwen Tong, Kongzhi Hu, Jiapeng Luo, Zheng Ma, et al. How far are we to gpt-4v? closing the gap to commercial multimodal models with open-source suites. arXiv preprint arXiv:2404.16821, 2024. 2, 3" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 89, + 517, + 527, + 558 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 517, + 527, + 558 + ], + "spans": [ + { + "bbox": [ + 89, + 517, + 527, + 558 + ], + "type": "text", + "content": "[21] Zhe Chen, Jiannan Wu, Wenhai Wang, Weijie Su, Guo Chen, Sen Xing, Muyan Zhong, Qinglong Zhang, Xizhou Zhu, Lewei Lu, et al. Internvl: Scaling up vision foundation models and aligning for generic visual-linguistic tasks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 24185–24198, 2024. 1, 2, 3" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 89, + 566, + 527, + 588 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 566, + 527, + 588 + ], + "spans": [ + { + "bbox": [ + 89, + 566, + 527, + 588 + ], + "type": "text", + "content": "[22] Kanzhi Cheng, Qiushi Sun, Yougang Chu, Fangzhi Xu, Yantao Li, Jianbing Zhang, and Zhiyong Wu. Seeclick: Harnessing gui grounding for advanced visual gui agents. arXiv preprint arXiv:2401.10935, 2024. 16" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 89, + 594, + 527, + 626 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 594, + 527, + 626 + ], + "spans": [ + { + "bbox": [ + 89, + 594, + 527, + 626 + ], + "type": "text", + "content": "[23] Zesen Cheng, Sicong Leng, Hang Zhang, Yifei Xin, Xin Li, Guanzheng Chen, Yongxin Zhu, Wenqi Zhang, Ziyang Luo, Deli Zhao, et al. Videollama 2: Advancing spatial-temporal modeling and audio understanding in video-llms. arXiv preprint arXiv:2406.07476, 2024. 15" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 89, + 633, + 527, + 655 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 633, + 527, + 655 + ], + "spans": [ + { + "bbox": [ + 89, + 633, + 527, + 655 + ], + "type": "text", + "content": "[24] Christopher Clark and Matt Gardner. Simple and effective multi-paragraph reading comprehension. In Proceedings of the Annual Meeting of the Association for Computational Linguistics, pages 845–855, 2018. 6" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 89, + 662, + 527, + 693 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 662, + 527, + 693 + ], + "spans": [ + { + "bbox": [ + 89, + 662, + 527, + 693 + ], + "type": "text", + "content": "[25] Karl Cobbe, Vineet Kosaraju, Mohammad Bavarian, Mark Chen, Heewoo Jun, Lukasz Kaiser, Matthias Plappert, Jerry Tworek, Jacob Hilton, Reiichiro Nakano, et al. Training verifiers to solve math word problems. arXiv preprint arXiv:2110.14168, 2021. 16" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 89, + 700, + 527, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 700, + 527, + 723 + ], + "spans": [ + { + "bbox": [ + 89, + 700, + 527, + 723 + ], + "type": "text", + "content": "[26] OpenCompass Contributors. Opencompass: A universal evaluation platform for foundation models. https://github.com/open-compass/opencompass, 2023. 9, 10, 11, 12" + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "20" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 19 + }, + { + "para_blocks": [ + { + "bbox": [ + 89, + 72, + 529, + 723 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 89, + 72, + 529, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 72, + 529, + 95 + ], + "spans": [ + { + "bbox": [ + 89, + 72, + 529, + 95 + ], + "type": "text", + "content": "[27] X.AI Corp. Grok-1.5 vision preview: Connecting the digital and physical worlds with our first multimodal model. https://x.ai/blog/grok-1.5v, 2024.11" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 89, + 101, + 528, + 133 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 101, + 528, + 133 + ], + "spans": [ + { + "bbox": [ + 89, + 101, + 528, + 133 + ], + "type": "text", + "content": "[28] Wenliang Dai, Nayeon Lee, Boxin Wang, Zhuolin Yang, Zihan Liu, Jon Barker, Tuomas Rintamaki, Moham-mad Shoeybi, Bryan Catanzaro, and Wei Ping. NvIm: Open frontier-class multimodal llms. arXiv preprint arXiv:2409.11402, 2024. 10" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 89, + 140, + 529, + 163 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 140, + 529, + 163 + ], + "spans": [ + { + "bbox": [ + 89, + 140, + 529, + 163 + ], + "type": "text", + "content": "[29] Google Deepmind. Gemini 2.0 is now available to everyone. https://blog.google/technology/google-deepmind/gemini-model-updates-february-2025/, 202.9" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 89, + 169, + 527, + 191 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 169, + 527, + 191 + ], + "spans": [ + { + "bbox": [ + 89, + 169, + 527, + 191 + ], + "type": "text", + "content": "[30] Google Deepmind. Introducing gemini 2.0: our new ai model for the agentic era. https://blog.google/technology/google-deepmind/google-gemini-ai-update-december-2024/, 2024.9" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 89, + 198, + 527, + 229 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 198, + 527, + 229 + ], + "spans": [ + { + "bbox": [ + 89, + 198, + 527, + 229 + ], + "type": "text", + "content": "[31] Matt Deitke, Christopher Clark, Sangho Lee, Rohun Tripathi, Yue Yang, Jae Sung Park, Mohammadreza Salehi, Niklas Muennighoff, Kyle Lo, Luca Soldaini, et al. Molmo and pixmo: Open weights and open data for state-of-the-art multimodal models. arXiv preprint arXiv:2409.17146, 2024. 1, 10" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 89, + 236, + 527, + 269 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 236, + 527, + 269 + ], + "spans": [ + { + "bbox": [ + 89, + 236, + 527, + 269 + ], + "type": "text", + "content": "[32] Xiaoyi Dong, Pan Zhang, Yuhang Zang, Yuhang Cao, Bin Wang, Linke Ouyang, Songyang Zhang, Haodong Duan, Wenwei Zhang, Yining Li, et al. Internlm-xcomposer2-4khd: A pioneering large vision-language model handling resolutions from 336 pixels to 4k hd. arXiv preprint arXiv:2404.06512, 2024. 1" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 89, + 274, + 527, + 308 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 274, + 527, + 308 + ], + "spans": [ + { + "bbox": [ + 89, + 274, + 527, + 308 + ], + "type": "text", + "content": "[33] Haodong Duan, Junming Yang, Yuxuan Qiao, Xinyu Fang, Lin Chen, Yuan Liu, Xiaoyi Dong, Yuhang Zang, Pan Zhang, Jiaqi Wang, et al. Vlmevalkit: An open-source toolkit for evaluating large multi-modality models. In Proceedings of the 32nd ACM International Conference on Multimedia, pages 11198-11201, 2024. 7" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 89, + 313, + 527, + 346 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 313, + 527, + 346 + ], + "spans": [ + { + "bbox": [ + 89, + 313, + 527, + 346 + ], + "type": "text", + "content": "[34] Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Amy Yang, Angela Fan, et al. The llama 3 herd of models. arXiv preprint arXiv:2407.21783, 2024. 10" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 89, + 352, + 527, + 375 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 352, + 527, + 375 + ], + "spans": [ + { + "bbox": [ + 89, + 352, + 527, + 375 + ], + "type": "text", + "content": "[35] Xinyu Fang, Kangrui Mao, Haodong Duan, Xiangyu Zhao, Yining Li, Dahua Lin, and Kai Chen. Mmbench-video: A long-form multi-shot benchmark for holistic video understanding. arXiv preprint arXiv:2406.14515, 2024. 14, 15" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 89, + 381, + 527, + 413 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 381, + 527, + 413 + ], + "spans": [ + { + "bbox": [ + 89, + 381, + 527, + 413 + ], + "type": "text", + "content": "[36] Li Fei-Fei, Rob Fergus, and Pietro Perona. Learning generative visual models from few training examples: An incremental bayesian approach tested on 101 object categories. In Conference on Computer Vision and Pattern Recognition Workshop, pages 178-178, 2004. 7" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 89, + 419, + 527, + 452 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 419, + 527, + 452 + ], + "spans": [ + { + "bbox": [ + 89, + 419, + 527, + 452 + ], + "type": "text", + "content": "[37] Chaoyou Fu, Peixian Chen, Yunhang Shen, Yulei Qin, Mengdan Zhang, Xu Lin, Zhenyu Qiu, Wei Lin, Jinrui Yang, Xiawu Zheng, et al. Mme: A comprehensive evaluation benchmark for multimodal large language models. arXiv preprint arXiv:2306.13394, 2023. 12" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 89, + 458, + 527, + 491 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 458, + 527, + 491 + ], + "spans": [ + { + "bbox": [ + 89, + 458, + 527, + 491 + ], + "type": "text", + "content": "[38] Chaoyou Fu, Yuhan Dai, Yondong Luo, Lei Li, Shuhuai Ren, Renrui Zhang, Zihan Wang, Chenyu Zhou, Yunhang Shen, Mengdan Zhang, et al. Video-mme: The first-ever comprehensive evaluation benchmark of multi-modal llms in video analysis. arXiv preprint arXiv:2405.21075, 2024. 14, 15" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 89, + 497, + 527, + 529 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 497, + 527, + 529 + ], + "spans": [ + { + "bbox": [ + 89, + 497, + 527, + 529 + ], + "type": "text", + "content": "[39] Xingyu Fu, Yushi Hu, Bangzheng Li, Yu Feng, Haoyu Wang, Xudong Lin, Dan Roth, Noah A Smith, Wei-Chiu Ma, and Ranjay Krishna. Blink: Multimodal large language models can see but not perceive. arXiv preprint arXiv:2404.12390, 2024. 9, 11" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 89, + 536, + 527, + 567 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 536, + 527, + 567 + ], + "spans": [ + { + "bbox": [ + 89, + 536, + 527, + 567 + ], + "type": "text", + "content": "[40] Jiahui Gao, Renjie Pi, Jipeng Zhang, Jiacheng Ye, Wanjun Zhong, Yufei Wang, Lanqing Hong, Jianhua Han, Hang Xu, Zhenguo Li, et al. G-llava: Solving geometric problem with multi-modal large language model. arXiv preprint arXiv:2312.11370, 2023. 6" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 89, + 574, + 527, + 607 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 574, + 527, + 607 + ], + "spans": [ + { + "bbox": [ + 89, + 574, + 527, + 607 + ], + "type": "text", + "content": "[41] Zhangwei Gao, Zhe Chen, Erfei Cui, Yiming Ren, Weiyun Wang, Jinguo Zhu, Hao Tian, Shenglong Ye, Junjun He, Xizhou Zhu, et al. Mini-internvl: A flexible-transfer pocket multimodal model with " + }, + { + "bbox": [ + 89, + 574, + 527, + 607 + ], + "type": "inline_equation", + "content": "5\\%" + }, + { + "bbox": [ + 89, + 574, + 527, + 607 + ], + "type": "text", + "content": " parameters and " + }, + { + "bbox": [ + 89, + 574, + 527, + 607 + ], + "type": "inline_equation", + "content": "90\\%" + }, + { + "bbox": [ + 89, + 574, + 527, + 607 + ], + "type": "text", + "content": " performance. arXiv preprint arXiv:2410.16261, 2024. 3" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 89, + 613, + 527, + 645 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 613, + 527, + 645 + ], + "spans": [ + { + "bbox": [ + 89, + 613, + 527, + 645 + ], + "type": "text", + "content": "[42] Junqi Ge, Ziyi Chen, Jintao Lin, Jinguo Zhu, Xihui Liu, Jifeng Dai, and Xizhou Zhu. V2pe: Improving multi-modal long-context capability of vision-language models with variable visual position encoding. arXiv preprint arXiv:2412.09616, 2024. 2, 3, 18" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 89, + 651, + 527, + 684 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 651, + 527, + 684 + ], + "spans": [ + { + "bbox": [ + 89, + 651, + 527, + 684 + ], + "type": "text", + "content": "[43] Yash Goyal, Tejas Khot, Douglas Summers-Stay, Dhruv Batra, and Devi Parikh. Making the v in vqa matter: Elevating the role of image understanding in visual question answering. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6904-6913, 2017. 6" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 89, + 691, + 527, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 691, + 527, + 723 + ], + "spans": [ + { + "bbox": [ + 89, + 691, + 527, + 723 + ], + "type": "text", + "content": "[44] Shuhao Gu, Jialing Zhang, Siyuan Zhou, Kevin Yu, Zhaohu Xing, Liangdong Wang, Zhou Cao, Jintao Jia, Zhuoyi Zhang, Yixuan Wang, et al. Infinity-mm: Scaling multimodal performance with large-scale and high-quality instruction data. arXiv preprint arXiv:2410.18558, 2024. 9, 10" + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "21" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 20 + }, + { + "para_blocks": [ + { + "bbox": [ + 89, + 72, + 527, + 722 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 89, + 72, + 526, + 105 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 72, + 526, + 105 + ], + "spans": [ + { + "bbox": [ + 89, + 72, + 526, + 105 + ], + "type": "text", + "content": "[45] Tianrui Guan, Fuxiao Liu, Xiyang Wu, Ruiqi Xian, Zongxia Li, Xiaoyu Liu, Xijun Wang, Lichang Chen, Furong Huang, Yaser Yacoob, et al. Hallusionbench: An advanced diagnostic suite for entangled language hallucination & visual illusion in large vision-language models. arXiv preprint arXiv:2310.14566, 2023. 8, 12, 13" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 89, + 110, + 527, + 140 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 110, + 527, + 140 + ], + "spans": [ + { + "bbox": [ + 89, + 110, + 527, + 140 + ], + "type": "text", + "content": "[46] Dan Hendrycks, Collin Burns, Steven Basart, Andy Zou, Mantas Mazeika, Dawn Song, and Jacob Steinhardt. Measuring massive multitask language understanding. In The International Conference on Learning Representations, 2020. 16" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 89, + 148, + 527, + 189 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 148, + 527, + 189 + ], + "spans": [ + { + "bbox": [ + 89, + 148, + 527, + 189 + ], + "type": "text", + "content": "[47] Dan Hendrycks, Collin Burns, Saurav Kadavath, Akul Arora, Steven Basart, Eric Tang, Dawn Song, and Jacob Steinhardt. Measuring mathematical problem solving with the MATH dataset. In Joaquin Vanschoeren and Sai-Kit Yeung, editors, Proceedings of the Neural Information Processing Systems Track on Datasets and Benchmarks 1, NeurIPS Datasets and Benchmarks 2021, December 2021, virtual, 2021. 16" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 89, + 196, + 527, + 227 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 196, + 527, + 227 + ], + "spans": [ + { + "bbox": [ + 89, + 196, + 527, + 227 + ], + "type": "text", + "content": "[48] Yuzhen Huang, Yuzhuo Bai, Zhihao Zhu, Junlei Zhang, Jinghan Zhang, Tangjun Su, Junteng Liu, Chuancheng Lv, Yikai Zhang, Yao Fu, et al. C-eval: A multi-level multi-discipline chinese evaluation suite for foundation models. Advances in Neural Information Processing Systems, 36, 2024. 16" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 89, + 233, + 527, + 264 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 233, + 527, + 264 + ], + "spans": [ + { + "bbox": [ + 89, + 233, + 527, + 264 + ], + "type": "text", + "content": "[49] Zheng Huang, Kai Chen, Jianhua He, Xiang Bai, Dimosthenis Karatzas, Shijian Lu, and CV Jawahar. Icdar2019 competition on scanned receiptOCR and information extraction. In 2019 International Conference on Document Analysis and Recognition (ICDAR), pages 1516-1520. IEEE, 2019. 6" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 89, + 270, + 527, + 301 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 270, + 527, + 301 + ], + "spans": [ + { + "bbox": [ + 89, + 270, + 527, + 301 + ], + "type": "text", + "content": "[50] Drew A Hudson and Christopher D Manning. Gqa: A new dataset for real-world visual reasoning and compositional question answering. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6700–6709, 2019. 6" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 89, + 308, + 527, + 331 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 308, + 527, + 331 + ], + "spans": [ + { + "bbox": [ + 89, + 308, + 527, + 331 + ], + "type": "text", + "content": "[51] Dongfu Jiang, Xuan He, Huaye Zeng, Cong Wei, Max Ku, Qian Liu, and Wenhu Chen. Mantis: Interleaved multi-image instruction tuning. arXiv preprint arXiv:2405.01483, 2024. 9, 11" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 89, + 336, + 527, + 358 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 336, + 527, + 358 + ], + "spans": [ + { + "bbox": [ + 89, + 336, + 527, + 358 + ], + "type": "text", + "content": "[52] Mandar Joshi, Eunsol Choi, Daniel S Weld, and Luke Zettlemoyer. Triviaqa: A large scale distantly supervised challenge dataset for reading comprehension. arXiv preprint arXiv:1705.03551, 2017. 16" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 89, + 364, + 527, + 385 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 364, + 527, + 385 + ], + "spans": [ + { + "bbox": [ + 89, + 364, + 527, + 385 + ], + "type": "text", + "content": "[53] Seungjae Jung, Gunsoo Han, Daniel Wontae Nam, and Kyoung-Woon On. Binary classifier optimization for large language model alignment. arXiv preprint arXiv:2404.04656, 2024. 6" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 89, + 392, + 527, + 422 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 392, + 527, + 422 + ], + "spans": [ + { + "bbox": [ + 89, + 392, + 527, + 422 + ], + "type": "text", + "content": "[54] Kushal Kafle, Brian Price, Scott Cohen, and Christopher Kanan. Dvqa: Understanding data visualizations via question answering. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5648-5656, 2018. 6" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 89, + 429, + 527, + 451 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 429, + 527, + 451 + ], + "spans": [ + { + "bbox": [ + 89, + 429, + 527, + 451 + ], + "type": "text", + "content": "[55] Mehran Kazemi, Hamidreza Alvari, Ankit Anand, Jialin Wu, Xi Chen, and Radu Soricut. Geomverse: A systematic evaluation of large models for geometric reasoning. arXiv preprint arXiv:2312.12241, 2023. 6" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 89, + 457, + 527, + 488 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 457, + 527, + 488 + ], + "spans": [ + { + "bbox": [ + 89, + 457, + 527, + 488 + ], + "type": "text", + "content": "[56] Sahar Kazemzadeh, Vicente Ordonez, Mark Matten, and Tamara Berg. Referitgame: Referring to objects in photographs of natural scenes. In Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing, pages 787-798, 2014. 13" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 89, + 495, + 527, + 517 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 495, + 527, + 517 + ], + "spans": [ + { + "bbox": [ + 89, + 495, + 527, + 517 + ], + "type": "text", + "content": "[57] Aniruddha Kembhavi, Mike Salvato, Eric Kolve, Minjoon Seo, Hannaneh Hajishirzi, and Ali Farhadi. A diagram is worth a dozen images. In European Conference on Computer Vision, pages 235-251, 2016. 6, 7, 8, 10" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 89, + 522, + 527, + 553 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 522, + 527, + 553 + ], + "spans": [ + { + "bbox": [ + 89, + 522, + 527, + 553 + ], + "type": "text", + "content": "[58] Tom Kwiatkowski, Jennimaria Palomaki, Olivia Redfield, Michael Collins, Ankur Parikh, Chris Alberti, Danielle Epstein, Illia Polosukhin, Jacob Devlin, Kenton Lee, et al. Natural questions: a benchmark for question answering research. Transactions of the Association for Computational Linguistics, 7:453-466, 2019. 16" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 89, + 560, + 527, + 582 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 560, + 527, + 582 + ], + "spans": [ + { + "bbox": [ + 89, + 560, + 527, + 582 + ], + "type": "text", + "content": "[59] Guokun Lai, Qizhe Xie, Hanxiao Liu, Yiming Yang, and Eduard Hovy. Race: Large-scale reading comprehension dataset from examinations. arXiv preprint arXiv:1704.04683, 2017. 16" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 89, + 588, + 527, + 618 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 588, + 527, + 618 + ], + "spans": [ + { + "bbox": [ + 89, + 588, + 527, + 618 + ], + "type": "text", + "content": "[60] Bo Li, Yuanhan Zhang, Dong Guo, Renrui Zhang, Feng Li, Hao Zhang, Kaichen Zhang, Yanwei Li, Ziwei Liu, and Chunyuan Li. Llava-onevision: Easy visual task transfer. arXiv preprint arXiv:2408.03326, 2024. 9, 10, 11, 12, 15, 16" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 89, + 625, + 527, + 655 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 625, + 527, + 655 + ], + "spans": [ + { + "bbox": [ + 89, + 625, + 527, + 655 + ], + "type": "text", + "content": "[61] Bohao Li, Yuying Ge, Yi Chen, Yixiao Ge, Ruimao Zhang, and Ying Shan. Seed-bench-2-plus: Benchmarking multimodal large language models with text-rich visual comprehension. arXiv preprint arXiv:2404.16790, 2024.8, 10" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 89, + 663, + 527, + 694 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 663, + 527, + 694 + ], + "spans": [ + { + "bbox": [ + 89, + 663, + 527, + 694 + ], + "type": "text", + "content": "[62] Chunyi Li, Jianbo Zhang, Zicheng Zhang, Haoning Wu, Yuan Tian, Wei Sun, Guo Lu, Xiaohong Liu, Xiongkuo Min, Weisi Lin, et al. R-bench: Are your large multimodal model robust to real-world corruptions? arXiv preprint arXiv:2410.05474, 2024. 11" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 89, + 700, + 527, + 722 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 700, + 527, + 722 + ], + "spans": [ + { + "bbox": [ + 89, + 700, + 527, + 722 + ], + "type": "text", + "content": "[63] Haonan Li, Yixuan Zhang, Fajri Koto, Yifei Yang, Hai Zhao, Yeyun Gong, Nan Duan, and Timothy Baldwin. Cmflu: Measuring massive multitask language understanding in chinese. arXiv preprint arXiv:2306.09212, 2023. 16" + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "22" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 21 + }, + { + "para_blocks": [ + { + "bbox": [ + 89, + 72, + 529, + 723 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 89, + 72, + 529, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 72, + 529, + 95 + ], + "spans": [ + { + "bbox": [ + 89, + 72, + 529, + 95 + ], + "type": "text", + "content": "[64] KunChang Li, Yinan He, Yi Wang, Yizhuo Li, Wenhai Wang, Ping Luo, Yali Wang, Limin Wang, and Yu Qiao. Videochat: Chat-centric video understanding. arXiv preprint arXiv:2305.06355, 2023. 15" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 89, + 99, + 528, + 132 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 99, + 528, + 132 + ], + "spans": [ + { + "bbox": [ + 89, + 99, + 528, + 132 + ], + "type": "text", + "content": "[65] Kunchang Li, Yali Wang, Yinan He, Yizhuo Li, Yi Wang, Yi Liu, Zun Wang, Jilan Xu, Guo Chen, Ping Luo, et al. Mvbench: A comprehensive multi-modal video understanding benchmark. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 22195-22206, 2024. 14, 15" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 89, + 137, + 529, + 169 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 137, + 529, + 169 + ], + "spans": [ + { + "bbox": [ + 89, + 137, + 529, + 169 + ], + "type": "text", + "content": "[66] Yanghao Li, Chao-Yuan Wu, Haoqi Fan, Karttikeya Mangalam, Bo Xiong, Jitendra Malik, and Christoph Feichtenhofer. Mviv2: Improved multiscale vision transformers for classification and detection. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4804-4814, 2022. 1, 3" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 89, + 173, + 527, + 206 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 173, + 527, + 206 + ], + "spans": [ + { + "bbox": [ + 89, + 173, + 527, + 206 + ], + "type": "text", + "content": "[67] Yifan Li, Yifan Du, Kun Zhou, Jinping Wang, Wayne Xin Zhao, and Ji-Rong Wen. Evaluating object hallucination in large vision-language models. In The Conference on Empirical Methods in Natural Language Processing, pages 292–305, 2023. 12, 13" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 89, + 211, + 527, + 243 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 211, + 527, + 243 + ], + "spans": [ + { + "bbox": [ + 89, + 211, + 527, + 243 + ], + "type": "text", + "content": "[68] Zhang Li, Biao Yang, Qiang Liu, Zhiyin Ma, Shuo Zhang, Jingxu Yang, Yabo Sun, Yuliang Liu, and Xiang Bai. Monkey: Image resolution and text label are important things for large multi-modal models. arXiv preprint arXiv:2311.06607, 2023. 1" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 89, + 247, + 527, + 281 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 247, + 527, + 281 + ], + "spans": [ + { + "bbox": [ + 89, + 247, + 527, + 281 + ], + "type": "text", + "content": "[69] Zhiqi Li, Guo Chen, Shilong Liu, Shihao Wang, Vibashan VS, Yishen Ji, Shiyi Lan, Hao Zhang, Yilin Zhao, Subhashree Radhakrishnan, et al. Eagle 2: Building post-training data strategies from scratch for frontier vision-language models. arXiv preprint arXiv:2501.14818, 2025. 1" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 89, + 285, + 527, + 318 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 285, + 527, + 318 + ], + "spans": [ + { + "bbox": [ + 89, + 285, + 527, + 318 + ], + "type": "text", + "content": "[70] Hunter Lightman, Vineet Kosaraju, Yuri Burda, Harrison Edwards, Bowen Baker, Teddy Lee, Jan Leike, John Schulman, Ilya Sutskever, and Karl Cobbe. Let's verify step by step. In The Twelfth International Conference on Learning Representations, 2023. 7" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 89, + 322, + 527, + 355 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 322, + 527, + 355 + ], + "spans": [ + { + "bbox": [ + 89, + 322, + 527, + 355 + ], + "type": "text", + "content": "[71] Ji Lin, Hongxu Yin, Wei Ping, Pavlo Molchanov, Mohammad Shoeybi, and Song Han. Vila: On pre-training for visual language models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 26689-26699, 2024. 1, 9, 10, 15, 16" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 89, + 358, + 527, + 382 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 358, + 527, + 382 + ], + "spans": [ + { + "bbox": [ + 89, + 358, + 527, + 382 + ], + "type": "text", + "content": "[72] Adam Dahlgren Lindström and Savitha Sam Abraham. Clevr-math: A dataset for compositional language, visual and mathematical reasoning. arXiv preprint arXiv:2208.05358, 2022. 6" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 89, + 386, + 527, + 409 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 386, + 527, + 409 + ], + "spans": [ + { + "bbox": [ + 89, + 386, + 527, + 409 + ], + "type": "text", + "content": "[73] Haotian Liu, Chunyuan Li, Qingyang Wu, and Yong Jae Lee. Visual instruction tuning. Advances in Neural Information Processing Systems, 36, 2023. 2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 89, + 413, + 527, + 446 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 413, + 527, + 446 + ], + "spans": [ + { + "bbox": [ + 89, + 413, + 527, + 446 + ], + "type": "text", + "content": "[74] Shilong Liu, Zhaoyang Zeng, Tianhe Ren, Feng Li, Hao Zhang, Jie Yang, Qing Jiang, Chunyuan Li, Jianwei Yang, Hang Su, et al. Grounding dino: Marrying dino with grounded pre-training for open-set object detection. In European Conference on Computer Vision, pages 38-55. Springer, 2025. 13" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 89, + 450, + 527, + 482 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 450, + 527, + 482 + ], + "spans": [ + { + "bbox": [ + 89, + 450, + 527, + 482 + ], + "type": "text", + "content": "[75] Yuan Liu, Haodong Duan, Yuanhan Zhang, Bo Li, Songyang Zhang, Wangbo Zhao, Yike Yuan, Jiaqi Wang, Conghui He, Ziwei Liu, et al. Mmbench: Is your multi-modal model an all-around player? arXiv preprint arXiv:2307.06281, 2023. 12" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 89, + 487, + 527, + 520 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 487, + 527, + 520 + ], + "spans": [ + { + "bbox": [ + 89, + 487, + 527, + 520 + ], + "type": "text", + "content": "[76] Yuliang Liu, Zhang Li, Hongliang Li, Wenwen Yu, Mingxin Huang, Dezhi Peng, Mingyu Liu, Mingrui Chen, Chunyuan Li, Lianwen Jin, et al. On the hidden mystery ofOCR in large multimodal models. arXiv preprint arXiv:2305.07895, 2023. 8, 10" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 89, + 525, + 527, + 548 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 525, + 527, + 548 + ], + "spans": [ + { + "bbox": [ + 89, + 525, + 527, + 548 + ], + "type": "text", + "content": "[77] Zihan Liu, Yang Chen, Mohammad Shoeybi, Bryan Catanzaro, and Wei Ping. Acemath: Advancing frontier math reasoning with post-training and reward modeling. arXiv preprint, 2024. 5" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 89, + 552, + 527, + 574 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 552, + 527, + 574 + ], + "spans": [ + { + "bbox": [ + 89, + 552, + 527, + 574 + ], + "type": "text", + "content": "[78] Zuyan Liu, Yuhao Dong, Ziwei Liu, Winston Hu, Jiwen Lu, and Yongming Rao. Oryx mllm: On-demand spatial-temporal understanding at arbitrary resolution. arXiv preprint arXiv:2409.12961, 2024. 15" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 89, + 578, + 527, + 611 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 578, + 527, + 611 + ], + "spans": [ + { + "bbox": [ + 89, + 578, + 527, + 611 + ], + "type": "text", + "content": "[79] Dakuan Lu, Xiaoyu Tan, Rui Xu, Tianchu Yao, Chao Qu, Wei Chu, Yinghui Xu, and Yuan Qi. Scp-116k: A high-quality problem-solution dataset and a generalized pipeline for automated extraction in the higher education science domain, 2025. 5" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 89, + 616, + 527, + 649 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 616, + 527, + 649 + ], + "spans": [ + { + "bbox": [ + 89, + 616, + 527, + 649 + ], + "type": "text", + "content": "[80] Pan Lu, Hritik Bansal, Tony Xia, Jiacheng Liu, Chunyuan Li, Hannaneh Hajishirzi, Hao Cheng, Kai-Wei Chang, Michel Galley, and Jianfeng Gao. Mathvista: Evaluating mathematical reasoning of foundation models in visual contexts. arXiv preprint arXiv:2310.02255, 2023. 7, 8, 9" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 89, + 653, + 527, + 685 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 653, + 527, + 685 + ], + "spans": [ + { + "bbox": [ + 89, + 653, + 527, + 685 + ], + "type": "text", + "content": "[81] Pan Lu, Ran Gong, Shibiao Jiang, Liang Qiu, Siyuan Huang, Xiaodan Liang, and Song-Chun Zhu. Inter-gps: Interpretable geometry problem solving with formal language and symbolic reasoning. arXiv preprint arXiv:2105.04165, 2021. 6" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 89, + 690, + 527, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 690, + 527, + 723 + ], + "spans": [ + { + "bbox": [ + 89, + 690, + 527, + 723 + ], + "type": "text", + "content": "[82] Pan Lu, Swaroop Mishra, Tanglin Xia, Liang Qiu, Kai-Wei Chang, Song-Chun Zhu, Oyvind Tafjord, Peter Clark, and Ashwin Kalyan. Learn to explain: Multimodal reasoning via thought chains for science question answering. Advances in Neural Information Processing Systems, 35:2507-2521, 2022. 6" + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "23" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 22 + }, + { + "para_blocks": [ + { + "bbox": [ + 85, + 72, + 527, + 721 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 89, + 72, + 526, + 103 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 72, + 526, + 103 + ], + "spans": [ + { + "bbox": [ + 89, + 72, + 526, + 103 + ], + "type": "text", + "content": "[83] Pan Lu, Liang Qiu, Jiaqi Chen, Tony Xia, Yizhou Zhao, Wei Zhang, Zhou Yu, Xiaodan Liang, and Song-Chun Zhu. Iconqa: A new benchmark for abstract diagram understanding and visual language reasoning. arXiv preprint arXiv:2110.13214, 2021.6" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 89, + 110, + 526, + 132 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 110, + 526, + 132 + ], + "spans": [ + { + "bbox": [ + 89, + 110, + 526, + 132 + ], + "type": "text", + "content": "[84] Shiyin Lu, Yang Li, Qing-Guo Chen, Zhao Xu, Weihua Luo, Kaifu Zhang, and Han-Jia Ye. Ovis: Structural embedding alignment for multimodal large language model. arXiv preprint arXiv:2405.20797, 2024. 9, 10" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 89, + 137, + 527, + 169 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 137, + 527, + 169 + ], + "spans": [ + { + "bbox": [ + 89, + 137, + 527, + 169 + ], + "type": "text", + "content": "[85] Xudong Lu, Yinghao Chen, Cheng Chen, Hui Tan, Boheng Chen, Yina Xie, Rui Hu, Guanxin Tan, Renshou Wu, Yan Hu, et al. Bluelm-v-3b: Algorithm and system co-design for multimodal large language models on mobile devices. arXiv preprint arXiv:2411.10640, 2024. 1" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 90, + 176, + 526, + 198 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 90, + 176, + 526, + 198 + ], + "spans": [ + { + "bbox": [ + 90, + 176, + 526, + 198 + ], + "type": "text", + "content": "[86] Yujie Lu, Dongfu Jiang, Wenhu Chen, William Yang Wang, Yejin Choi, and Bill Yuchen Lin. Wildvision: Evaluating vision-language models in the wild with human preferences. arXiv preprint arXiv:2406.11069, 2024. 11" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 89, + 204, + 527, + 235 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 204, + 527, + 235 + ], + "spans": [ + { + "bbox": [ + 89, + 204, + 527, + 235 + ], + "type": "text", + "content": "[87] Liangchen Luo, Yinxiao Liu, Rosanne Liu, Samrat Phatale, Harsh Lara, Yunxuan Li, Lei Shu, Yun Zhu, Lei Meng, Jiao Sun, et al. Improve mathematical reasoning in language models by automated process supervision. arXiv preprint arXiv:2406.06592, 2, 2024. 7" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 89, + 241, + 526, + 272 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 241, + 526, + 272 + ], + "spans": [ + { + "bbox": [ + 89, + 241, + 526, + 272 + ], + "type": "text", + "content": "[88] Junhua Mao, Jonathan Huang, Alexander Toshev, Oana Camburu, Alan L Yuille, and Kevin Murphy. Generation and comprehension of unambiguous object descriptions. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11–20, 2016. 13" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 89, + 279, + 526, + 309 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 279, + 526, + 309 + ], + "spans": [ + { + "bbox": [ + 89, + 279, + 526, + 309 + ], + "type": "text", + "content": "[89] Andrés Marafioti, Orr Zohar, Miquel Farré, Merve Noyan, Elie Bakouch, Pedro Cuenca, Cyril Zakka, Loubna Ben Allal, Anton Lozhkov, Nouamane Tazi, et al. Smolvlm: Redefining small and efficient multimodal models. arXiv preprint arXiv:2504.05299, 2025. 1" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 90, + 317, + 526, + 347 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 90, + 317, + 526, + 347 + ], + "spans": [ + { + "bbox": [ + 90, + 317, + 526, + 347 + ], + "type": "text", + "content": "[90] Kenneth Marino, Mohammad Rastegari, Ali Farhadi, and Roozbeh Mottaghi. Ok-vqa: A visual question answering benchmark requiring external knowledge. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3195-3204, 2019. 6" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 89, + 354, + 526, + 385 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 354, + 526, + 385 + ], + "spans": [ + { + "bbox": [ + 89, + 354, + 526, + 385 + ], + "type": "text", + "content": "[91] Ahmed Masry, Xuan Long Do, Jia Qing Tan, Shafiq Joty, and Enamul Hoque. Chartqa: A benchmark for question answering about charts with visual and logical reasoning. In Proceedings of the Annual Meeting of the Association for Computational Linguistics, pages 2263-2279, 2022. 6, 7, 8, 10" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 90, + 392, + 527, + 422 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 90, + 392, + 527, + 422 + ], + "spans": [ + { + "bbox": [ + 90, + 392, + 527, + 422 + ], + "type": "text", + "content": "[92] Minesh Mathew, Viraj Bagal, Ruben Tito, Dimosthenis Karatzas, Ernest Valveny, and CV Jawahar. Infographicvqa. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pages 1697-1706, 2022. 6, 7, 8, 10" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 89, + 430, + 527, + 460 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 430, + 527, + 460 + ], + "spans": [ + { + "bbox": [ + 89, + 430, + 527, + 460 + ], + "type": "text", + "content": "[93] Minesh Mathew, Dimosthenis Karatzas, and CV Jawahar. Docvqa: A dataset for vqa on document images. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pages 2200–2209, 2021. 7, 8, 10" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 90, + 468, + 527, + 488 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 90, + 468, + 527, + 488 + ], + "spans": [ + { + "bbox": [ + 90, + 468, + 527, + 488 + ], + "type": "text", + "content": "[94] Nat McAleese, Rai Michael Pokorny, Juan Felipe Ceron Uribe, Evgenia Nitishinskaya, Maja Trebacz, and Jan Leike. Llm critics help catch llm bugs. arXiv preprint arXiv:2407.00215, 2024. 7" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 89, + 495, + 526, + 525 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 495, + 526, + 525 + ], + "spans": [ + { + "bbox": [ + 89, + 495, + 526, + 525 + ], + "type": "text", + "content": "[95] Fanqing Meng, Jin Wang, Chuanhao Li, Quanfeng Lu, Hao Tian, Jiaqi Liao, Xizhou Zhu, Jifeng Dai, Yu Qiao, Ping Luo, et al. Mmiu: Multimodal multi-image understanding for evaluating large vision-language models. arXiv preprint arXiv:2408.02718, 2024. 9, 11" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 89, + 533, + 527, + 562 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 533, + 527, + 562 + ], + "spans": [ + { + "bbox": [ + 89, + 533, + 527, + 562 + ], + "type": "text", + "content": "[96] Anand Mishra, Shashank Shekhar, Ajeet Kumar Singh, and Anirban Chakraborty. Ocr-vqa: Visual question answering by reading text in images. In International Conference on Document Analysis and Recognition, pages 947-952, 2019. 6" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 89, + 571, + 527, + 590 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 571, + 527, + 590 + ], + "spans": [ + { + "bbox": [ + 89, + 571, + 527, + 590 + ], + "type": "text", + "content": "[97] OpenAI. Gpt-4v(ison) system card. https://cdn.openai.com/papers/GPTV_System/Card.pdf, 2023.1,8,9,10,11,12,14,15,16" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 90, + 598, + 501, + 609 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 90, + 598, + 501, + 609 + ], + "spans": [ + { + "bbox": [ + 90, + 598, + 501, + 609 + ], + "type": "text", + "content": "[98] OpenAI. Gpt-4o system card. https://openai.com/index/gpt-4o-system-card/, 2025.2,8" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 90, + 616, + 527, + 647 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 90, + 616, + 527, + 647 + ], + "spans": [ + { + "bbox": [ + 90, + 616, + 527, + 647 + ], + "type": "text", + "content": "[99] Runqi Qiao, Qiuna Tan, Guanting Dong, Minhui Wu, Chong Sun, Xiaoshuai Song, Zhuoma GongQue, Shanglin Lei, Zhe Wei, Miaoxuan Zhang, et al. We-math: Does your large multimodal model achieve human-like mathematical reasoning? arXiv preprint arXiv:2407.01284, 2024. 8, 9" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 85, + 654, + 527, + 683 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 654, + 527, + 683 + ], + "spans": [ + { + "bbox": [ + 85, + 654, + 527, + 683 + ], + "type": "text", + "content": "[100] Yujia Qin, Yining Ye, Junjie Fang, Haoming Wang, Shihao Liang, Shizuo Tian, Junda Zhang, Jiahao Li, Yunxin Li, Shijue Huang, et al. Ui-tars: Pioneering automated gui interaction with native agents. arXiv preprint arXiv:2501.12326, 2025. 16" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 85, + 691, + 527, + 721 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 691, + 527, + 721 + ], + "spans": [ + { + "bbox": [ + 85, + 691, + 527, + 721 + ], + "type": "text", + "content": "[101] Rafael Rafailov, Archit Sharma, Eric Mitchell, Christopher D Manning, Stefano Ermon, and Chelsea Finn. Direct preference optimization: Your language model is secretly a reward model. Advances in Neural Information Processing Systems, 36, 2024. 6" + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "24" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 23 + }, + { + "para_blocks": [ + { + "bbox": [ + 83, + 72, + 527, + 722 + ], + "type": "list", + "angle": 0, + "index": 20, + "blocks": [ + { + "bbox": [ + 85, + 72, + 526, + 105 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 72, + 526, + 105 + ], + "spans": [ + { + "bbox": [ + 85, + 72, + 526, + 105 + ], + "type": "text", + "content": "[102] Machel Reid, Nikolay Savinov, Denis Teplyashin, Dmitry Lepikhin, Timothy Lillicrap, Jean-baptiste Alayrac, Radu Soricut, Angeliki Lazaridou, Orhan Firat, Julian Schrittwieser, et al. Gemini 1.5: Unlocking multimodal understanding across millions of tokens of context. arXiv preprint arXiv:2403.05530, 2024. 10, 11, 12, 15, 16" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 83, + 110, + 526, + 140 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 110, + 526, + 140 + ], + "spans": [ + { + "bbox": [ + 83, + 110, + 526, + 140 + ], + "type": "text", + "content": "[103] Keisuke Sakaguchi, Ronan Le Bras, Chandra Bhagavatula, and Yejin Choi. Winogrande: An adversarial winograd schema challenge at scale. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 34, pages 8732-8740, 2020. 16" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 83, + 147, + 527, + 178 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 147, + 527, + 178 + ], + "spans": [ + { + "bbox": [ + 83, + 147, + 527, + 178 + ], + "type": "text", + "content": "[104] Minjoon Seo, Hannaneh Hajishirzi, Ali Farhadi, Oren Etzioni, and Clint Malcolm. Solving geometry problems: Combining text and diagram interpretation. In Proceedings of the 2015 conference on empirical methods in natural language processing, pages 1466-1476, 2015. 6" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 84, + 184, + 526, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 184, + 526, + 217 + ], + "spans": [ + { + "bbox": [ + 84, + 184, + 526, + 217 + ], + "type": "text", + "content": "[105] Min Shi, Fuxiao Liu, Shihao Wang, Shijia Liao, Subhashree Radhakrishnan, De-An Huang, Hongxu Yin, Karan Sapra, Yaser Yacoob, Humphrey Shi, et al. Eagle: Exploring the design space for multimodal llms with mixture of encoders. arXiv preprint arXiv:2408.15998, 2024. 1" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 84, + 222, + 527, + 253 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 222, + 527, + 253 + ], + "spans": [ + { + "bbox": [ + 84, + 222, + 527, + 253 + ], + "type": "text", + "content": "[106] Wenhao Shi, Zhiqiang Hu, Yi Bin, Junhua Liu, Yang Yang, See-Kiong Ng, Lidong Bing, and Roy Ka-Wei Lee. Math-llava: Bootstrapping mathematical reasoning for multimodal large language models. arXiv preprint arXiv:2406.17294, 2024. 6" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 84, + 259, + 527, + 291 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 259, + 527, + 291 + ], + "spans": [ + { + "bbox": [ + 84, + 259, + 527, + 291 + ], + "type": "text", + "content": "[107] Amanpreet Singh, Vivek Natarajan, Meet Shah, Yu Jiang, Xinlei Chen, Dhruv Batra, Devi Parikh, and Marcus Rohrbach. Towards vqa models that can read. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8317-8326, 2019. 6, 8, 10" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 84, + 297, + 526, + 318 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 297, + 526, + 318 + ], + "spans": [ + { + "bbox": [ + 84, + 297, + 526, + 318 + ], + "type": "text", + "content": "[108] Charlie Snell, Jaehoon Lee, Kelvin Xu, and Aviral Kumar. Scaling llm test-time compute optimally can be more effective than scaling model parameters. arXiv preprint arXiv:2408.03314, 2024. 7" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 84, + 323, + 526, + 346 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 323, + 526, + 346 + ], + "spans": [ + { + "bbox": [ + 84, + 323, + 526, + 346 + ], + "type": "text", + "content": "[109] Hai-Long Sun, Da-Wei Zhou, Yang Li, Shiyin Lu, Chao Yi, Qing-Guo Chen, Zhao Xu, Weihua Luo, Kaifu Zhang, De-Chuan Zhan, et al. Parrot: Multilingual visual instruction tuning. arXiv preprint arXiv:2406.02539, 2024. 14" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 84, + 351, + 526, + 373 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 351, + 526, + 373 + ], + "spans": [ + { + "bbox": [ + 84, + 351, + 526, + 373 + ], + "type": "text", + "content": "[110] Kai Sun, Dian Yu, Dong Yu, and Claire Cardie. Investigating prior knowledge for challenging Chinese machine reading comprehension. Transactions of the Association for Computational Linguistics, 8:141-155, 2020. 16" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 84, + 378, + 527, + 409 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 378, + 527, + 409 + ], + "spans": [ + { + "bbox": [ + 84, + 378, + 527, + 409 + ], + "type": "text", + "content": "[111] Zhiqing Sun, Sheng Shen, Shengcao Cao, Haotian Liu, Chunyuan Li, Yikang Shen, Chuang Gan, Liang-Yan Gui, Yu-Xiong Wang, Yiming Yang, et al. Aligning large multimodal models with factually augmented rlhf. arXiv preprint arXiv:2309.14525, 2023. 12, 13" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 84, + 415, + 526, + 446 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 415, + 526, + 446 + ], + "spans": [ + { + "bbox": [ + 84, + 415, + 526, + 446 + ], + "type": "text", + "content": "[112] Mirac Suzgun, Nathan Scales, Nathanael Schärli, Sebastian Gehrmann, Yi Tay, Hyung Won Chung, Aakanksha Chowdhery, Quoc V Le, Ed H Chi, Denny Zhou, et al. Challenging big-bench tasks and whether chain-of-thought can solve them. arXiv preprint arXiv:2210.09261, 2022. 16" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 84, + 453, + 527, + 483 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 453, + 527, + 483 + ], + "spans": [ + { + "bbox": [ + 84, + 453, + 527, + 483 + ], + "type": "text", + "content": "[113] Jingqun Tang, Qi Liu, Yongjie Ye, Jinghui Lu, Shu Wei, Chunhui Lin, Wanqing Li, Mohamad Fitri Faiz Bin Mahmood, Hao Feng, Zhen Zhao, et al. Mtvqa: Benchmarking multilingual text-centric visual question answering. arXiv preprint arXiv:2405.11985, 2024. 14" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 84, + 490, + 527, + 521 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 490, + 527, + 521 + ], + "spans": [ + { + "bbox": [ + 84, + 490, + 527, + 521 + ], + "type": "text", + "content": "[114] Gemini Team, Rohan Anil, Sebastian Borgeaud, Yonghui Wu, Jean-Baptiste Alayrac, Jiahui Yu, Radu Soricut, Johan Schalkwyk, Andrew M Dai, Anja Hauth, et al. Gemini: a family of highly capable multimodal models. arXiv preprint arXiv:2312.11805, 2023. 1, 14" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 84, + 527, + 359, + 539 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 527, + 359, + 539 + ], + "spans": [ + { + "bbox": [ + 84, + 527, + 359, + 539 + ], + "type": "text", + "content": "[115] Qwen Team. Qvq: To see the world with wisdom, December 2024. 9" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 84, + 544, + 527, + 576 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 544, + 527, + 576 + ], + "spans": [ + { + "bbox": [ + 84, + 544, + 527, + 576 + ], + "type": "text", + "content": "[116] Shengbang Tong, Ellis Brown, Penghao Wu, Sanghyun Woo, Manoj Middepogu, Sai Charitha Akula, Jihan Yang, Shusheng Yang, Adithya Iyer, Xichen Pan, et al. Cambrian-1: A fully open, vision-centric exploration of multimodal llms. arXiv preprint arXiv:2406.16860, 2024. 9, 10, 11, 12" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 84, + 582, + 526, + 594 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 582, + 526, + 594 + ], + "spans": [ + { + "bbox": [ + 84, + 582, + 526, + 594 + ], + "type": "text", + "content": "[117] v DeepMind. Gemini 2.5 pro. https://deepmind.google/technologies/gemini/pro/, 2025. 1, 2, 8" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 84, + 599, + 526, + 631 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 599, + 526, + 631 + ], + "spans": [ + { + "bbox": [ + 84, + 599, + 526, + 631 + ], + "type": "text", + "content": "[118] Fei Wang, Xingyu Fu, James Y Huang, Zekun Li, Qin Liu, Xiaogeng Liu, Mingyu Derek Ma, Nan Xu, Wenxuan Zhou, Kai Zhang, et al. Muirbench: A comprehensive benchmark for robust multi-image understanding. arXiv preprint arXiv:2406.09411, 2024. 9, 11" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 84, + 636, + 526, + 658 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 636, + 526, + 658 + ], + "spans": [ + { + "bbox": [ + 84, + 636, + 526, + 658 + ], + "type": "text", + "content": "[119] Ke Wang, Junting Pan, Weikang Shi, Zimu Lu, Mingjie Zhan, and Hongsheng Li. Measuring multimodal mathematical reasoning with math-vision dataset. arXiv preprint arXiv:2402.14804, 2024. 8, 9" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 84, + 664, + 526, + 685 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 664, + 526, + 685 + ], + "spans": [ + { + "bbox": [ + 84, + 664, + 526, + 685 + ], + "type": "text", + "content": "[120] Peiyi Wang, Lei Li, Zhihong Shao, RX Xu, Damai Dai, Yifei Li, Deli Chen, Yu Wu, and Zhifang Sui. Math-shepherd: Verify and reinforce lms step-by-step without human annotations. arXiv preprint arXiv:2312.08935, 2023. 7" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 84, + 691, + 526, + 722 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 691, + 526, + 722 + ], + "spans": [ + { + "bbox": [ + 84, + 691, + 526, + 722 + ], + "type": "text", + "content": "[121] Peng Wang, Shuai Bai, Sinan Tan, Shijie Wang, Zhihao Fan, Jinze Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, et al. Qwen2-vl: Enhancing vision-language model's perception of the world at any resolution. arXiv preprint arXiv:2409.12191, 2024. 1, 8, 10, 11, 12, 13, 14, 15" + } + ] + } + ], + "index": 19 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "25" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 24 + }, + { + "para_blocks": [ + { + "bbox": [ + 83, + 72, + 527, + 723 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 85, + 72, + 527, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 72, + 527, + 95 + ], + "spans": [ + { + "bbox": [ + 85, + 72, + 527, + 95 + ], + "type": "text", + "content": "[122] Peng Wang, Shijie Wang, Junyang Lin, Shuai Bai, Xiaohuan Zhou, Jingren Zhou, Xinggang Wang, and Chang Zhou. One-peace: Exploring one general representation model toward unlimited modalities. arXiv:2305.11172, 2023. 13" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 83, + 101, + 526, + 124 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 101, + 526, + 124 + ], + "spans": [ + { + "bbox": [ + 83, + 101, + 526, + 124 + ], + "type": "text", + "content": "[123] Weihan Wang, Qingsong Lv, Wenmeng Yu, Wenyi Hong, Ji Qi, Yan Wang, Junhui Ji, Zhuoyi Yang, Lei Zhao, Xixuan Song, et al. Cogvlm: Visual expert for pretrained language models. arXiv preprint arXiv:2311.03079, 2023. 1, 13" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 84, + 130, + 527, + 162 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 130, + 527, + 162 + ], + "spans": [ + { + "bbox": [ + 84, + 130, + 527, + 162 + ], + "type": "text", + "content": "[124] Weiyun Wang, Zhe Chen, Wenhai Wang, Yue Cao, Yangzhou Liu, Zhangwei Gao, Jinguo Zhu, Xizhou Zhu, Lewei Lu, Yu Qiao, and Jifeng Dai. Enhancing the reasoning ability of multimodal large language models via mixed preference optimization. arXiv preprint arXiv:2411.10442, 2024. 2, 6, 7" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 84, + 169, + 527, + 201 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 169, + 527, + 201 + ], + "spans": [ + { + "bbox": [ + 84, + 169, + 527, + 201 + ], + "type": "text", + "content": "[125] Weiyun Wang, Zhangwei Gao, Lianjie Chen, Zhe Chen, Jinguo Zhu, Xiangyu Zhao, Yangzhou Liu, Yue Cao, Shenglong Ye, Xizhou Zhu, et al. Visualprm: An effective process reward model for multimodal reasoning. arXiv preprint arXiv:2503.10291, 2025. 2, 7, 8, 9" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 84, + 208, + 527, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 208, + 527, + 239 + ], + "spans": [ + { + "bbox": [ + 84, + 208, + 527, + 239 + ], + "type": "text", + "content": "[126] Weiyun Wang, Yiming Ren, Haowen Luo, Tiantong Li, Chenxiang Yan, Zhe Chen, Wenhai Wang, Qingyun Li, Lewei Lu, Xizhou Zhu, et al. The all-seeing project v2: Towards general relation comprehension of the open world. arXiv preprint arXiv:2402.19474, 2024. 6, 12, 13" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 84, + 247, + 527, + 278 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 247, + 527, + 278 + ], + "spans": [ + { + "bbox": [ + 84, + 247, + 527, + 278 + ], + "type": "text", + "content": "[127] Weiyun Wang, Min Shi, Qingyun Li, Wenhai Wang, Zhenhang Huang, Linjie Xing, Zhe Chen, Hao Li, Xizhou Zhu, Zhiguo Cao, et al. The all-seeing project: Towards panoptic visual recognition and understanding of the open world. In The International Conference on Learning Representations, 2024. 6" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 84, + 285, + 527, + 317 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 285, + 527, + 317 + ], + "spans": [ + { + "bbox": [ + 84, + 285, + 527, + 317 + ], + "type": "text", + "content": "[128] Zirui Wang, Mengzhou Xia, Luxi He, Howard Chen, Yitao Liu, Richard Zhu, Kaiqu Liang, Xindi Wu, Haotian Liu, Sadhika Malladi, et al. Charxiv: Charting gaps in realistic chart understanding in multimodal llms. arXiv preprint arXiv:2406.18521, 2024. 8, 10" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 84, + 324, + 527, + 346 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 324, + 527, + 346 + ], + "spans": [ + { + "bbox": [ + 84, + 324, + 527, + 346 + ], + "type": "text", + "content": "[129] Haoning Wu, Dongxu Li, Bei Chen, and Junnan Li. Longvideobench: A benchmark for long-context interleaved video-language understanding. arXiv preprint arXiv:2407.15754, 2024. 8, 14, 15" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 84, + 353, + 527, + 384 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 353, + 527, + 384 + ], + "spans": [ + { + "bbox": [ + 84, + 353, + 527, + 384 + ], + "type": "text", + "content": "[130] Zhiyong Wu, Zhenyu Wu, Fangzhi Xu, Yian Wang, Qiushi Sun, Chengyou Jia, Kanzhi Cheng, Zichen Ding, Liheng Chen, Paul Pu Liang, et al. Os-atlas: A foundation action model for generalist gui agents. arXiv preprint arXiv:2410.23218, 2024. 16" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 84, + 392, + 527, + 414 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 392, + 527, + 414 + ], + "spans": [ + { + "bbox": [ + 84, + 392, + 527, + 414 + ], + "type": "text", + "content": "[131] Yijia Xiao, Edward Sun, Tianyu Liu, and Wei Wang. Logicvista: Multimodal llm logical reasoning benchmark in visual contexts. arXiv preprint arXiv:2407.04973, 2024. 8, 9" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 84, + 420, + 527, + 443 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 420, + 527, + 443 + ], + "spans": [ + { + "bbox": [ + 84, + 420, + 527, + 443 + ], + "type": "text", + "content": "[132] Yiheng Xu, Zekun Wang, Junli Wang, Dunjie Lu, Tianbao Xie, Amrita Saha, Doyen Sahoo, Tao Yu, and Caiming Xiong. Aguvis: Unified pure vision agents for autonomous gui interaction. 2024. 16" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 84, + 449, + 527, + 480 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 449, + 527, + 480 + ], + "spans": [ + { + "bbox": [ + 84, + 449, + 527, + 480 + ], + "type": "text", + "content": "[133] B. Yan, Yi Jiang, Jiannan Wu, D. Wang, Ping Luo, Zehuan Yuan, and Hutchuan Lu. Universal instance perception as object discovery and retrieval. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2023. 13" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 84, + 488, + 527, + 510 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 488, + 527, + 510 + ], + "spans": [ + { + "bbox": [ + 84, + 488, + 527, + 510 + ], + "type": "text", + "content": "[134] Jihan Yang, Shusheng Yang, Anjali Gupta, Rilyn Han, Li Fei-Fei, and Saining Xie. Thinking in Space: How Multimodal Large Language Models See, Remember and Recall Spaces. arXiv preprint arXiv:2412.14171, 2024. 16" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 84, + 517, + 527, + 540 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 517, + 527, + 540 + ], + "spans": [ + { + "bbox": [ + 84, + 517, + 527, + 540 + ], + "type": "text", + "content": "[135] Yuan Yao, Tianyu Yu, Ao Zhang, Chongyi Wang, Junbo Cui, Hongji Zhu, Tianchi Cai, Haoyu Li, Weilin Zhao, Zhihui He, et al. Minicpm-v: A gpt-4v level mllm on your phone. arXiv preprint arXiv:2408.01800, 2024. 9, 10, 11, 12, 15" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 84, + 545, + 527, + 578 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 545, + 527, + 578 + ], + "spans": [ + { + "bbox": [ + 84, + 545, + 527, + 578 + ], + "type": "text", + "content": "[136] Qinghao Ye, Haiyang Xu, Jiabo Ye, Ming Yan, Haowei Liu, Qi Qian, Ji Zhang, Fei Huang, and Jingren Zhou. mplug-owl2: Revolutionizing multi-modal large language model with modality collaboration. arXiv preprint arXiv:2311.04257, 2023. 1, 14" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 84, + 584, + 527, + 625 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 584, + 527, + 625 + ], + "spans": [ + { + "bbox": [ + 84, + 584, + 527, + 625 + ], + "type": "text", + "content": "[137] Kaining Ying, Fanqing Meng, Jin Wang, Zhiqian Li, Han Lin, Yue Yang, Hao Zhang, Wenbo Zhang, Yuqi Lin, Shuo Liu, Jiayi Lei, Quanfeng Lu, Runjian Chen, Peng Xu, Renrui Zhang, Haozhe Zhang, Peng Gao, Yali Wang, Yu Qiao, Ping Luo, Kaipeng Zhang, and Wenqi Shao. Mmt-bench: A comprehensive multimodal benchmark for evaluating large vision-language models towards multitask agi. arXiv preprint arXiv:2404.16006, 2024. 9, 11" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 84, + 633, + 527, + 655 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 633, + 527, + 655 + ], + "spans": [ + { + "bbox": [ + 84, + 633, + 527, + 655 + ], + "type": "text", + "content": "[138] Weihao Yu, Zhengyuan Yang, Linjie Li, Jianfeng Wang, Kevin Lin, Zicheng Liu, Xinchao Wang, and Lijuan Wang. Mm-vet: Evaluating large multimodal models for integrated capabilities. arXiv preprint arXiv:2308.02490, 2023. 12" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 84, + 662, + 527, + 693 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 662, + 527, + 693 + ], + "spans": [ + { + "bbox": [ + 84, + 662, + 527, + 693 + ], + "type": "text", + "content": "[139] Weihao Yu, Zhengyuan Yang, Linfeng Ren, Linjie Li, Jianfeng Wang, Kevin Lin, Chung-Ching Lin, Zicheng Liu, Lijuan Wang, and Xinchao Wang. Mm-vet2: A challenging benchmark to evaluate large multimodal models for integrated capabilities. arXiv preprint arXiv:2408.00765, 2024. 12" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 84, + 700, + 527, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 700, + 527, + 723 + ], + "spans": [ + { + "bbox": [ + 84, + 700, + 527, + 723 + ], + "type": "text", + "content": "[140] Ya-Qi Yu, Minghui Liao, Jiwen Zhang, and Jihao Wu. Texthawk2: A large vision-language model excels in bilingualOCR and grounding with 16x fewer tokens. arXiv preprint arXiv:2410.05261, 2024. 13" + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "type": "text", + "content": "26" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 25 + }, + { + "para_blocks": [ + { + "bbox": [ + 83, + 72, + 527, + 604 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 85, + 72, + 526, + 105 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 72, + 526, + 105 + ], + "spans": [ + { + "bbox": [ + 85, + 72, + 526, + 105 + ], + "type": "text", + "content": "[141] Xiang Yue, Yuansheng Ni, Kai Zhang, Tianyu Zheng, Ruoqi Liu, Ge Zhang, Samuel Stevens, Dongfu Jiang, Weiming Ren, Yuxuan Sun, et al. Mmmu: A massive multi-discipline multimodal understanding and reasoning benchmark for expert agi. arXiv preprint arXiv:2311.16502, 2023. 2, 7, 8, 9" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 83, + 110, + 526, + 142 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 110, + 526, + 142 + ], + "spans": [ + { + "bbox": [ + 83, + 110, + 526, + 142 + ], + "type": "text", + "content": "[142] Rowan Zellers, Ari Holtzman, Yonatan Bisk, Ali Farhadi, and Yejin Choi. Hellaswag: Can a machine really finish your sentence? In Proceedings of the Annual Meeting of the Association for Computational Linguistics, pages 4791-4800, 2019. 16" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 84, + 148, + 527, + 180 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 148, + 527, + 180 + ], + "spans": [ + { + "bbox": [ + 84, + 148, + 527, + 180 + ], + "type": "text", + "content": "[143] Haotian Zhang, Mingfei Gao, Zhe Gan, Philipp Dufter, Nina Wenzel, Forrest Huang, Dhruti Shah, Xianzhi Du, Bowen Zhang, Yanghao Li, et al. Mm1.5: Methods, analysis & insights from multimodal llm fine-tuning. arXiv preprint arXiv:2409.20566, 2024. 13" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 84, + 186, + 527, + 218 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 186, + 527, + 218 + ], + "spans": [ + { + "bbox": [ + 84, + 186, + 527, + 218 + ], + "type": "text", + "content": "[144] Haotian Zhang, Haoxuan You, Philipp Dufter, Bowen Zhang, Chen Chen, Hong-You Chen, Tsu-Jui Fu, William Yang Wang, Shih-Fu Chang, Zhe Gan, et al. Ferret-v2: An improved baseline for referring and grounding with large language models. arXiv preprint arXiv:2404.07973, 2024. 13" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 84, + 224, + 527, + 255 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 224, + 527, + 255 + ], + "spans": [ + { + "bbox": [ + 84, + 224, + 527, + 255 + ], + "type": "text", + "content": "[145] Peiyuan Zhang, Kaichen Zhang, Bo Li, Guangtao Zeng, Jingkang Yang, Yuanhan Zhang, Ziyue Wang, Haoran Tan, Chunyuan Li, and Ziwei Liu. Long context transfer from language to vision. arXiv preprint arXiv:2406.16852, 2024. 16" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 84, + 261, + 527, + 294 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 261, + 527, + 294 + ], + "spans": [ + { + "bbox": [ + 84, + 261, + 527, + 294 + ], + "type": "text", + "content": "[146] Renrui Zhang, Dongzhi Jiang, Yichi Zhang, Haokun Lin, Ziyu Guo, Pengshuo Qiu, Aojun Zhou, Pan Lu, Kai-Wei Chang, Peng Gao, et al. Mathverse: Does your multi-modal llm truly see the diagrams in visual math problems? arXiv preprint arXiv:2403.14624, 2024.8, 9" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 84, + 300, + 527, + 331 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 300, + 527, + 331 + ], + "spans": [ + { + "bbox": [ + 84, + 300, + 527, + 331 + ], + "type": "text", + "content": "[147] Renrui Zhang, Xinyu Wei, Dongzhi Jiang, Yichi Zhang, Ziyu Guo, Chengzhuo Tong, Jiaming Liu, Aojun Zhou, Bin Wei, Shanghang Zhang, et al. Mavis: Mathematical visual instruction tuning. arXiv preprint arXiv:2407.08739, 2024.6" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 84, + 337, + 527, + 360 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 337, + 527, + 360 + ], + "spans": [ + { + "bbox": [ + 84, + 337, + 527, + 360 + ], + "type": "text", + "content": "[148] Tianyu Zhang, Suyuchen Wang, Lu Li, Ge Zhang, Perouz Taslakian, Sai Rajeswar, Jie Fu, Bang Liu, and Yoshua Bengio. Vcr: Visual caption restoration. arXiv preprint arXiv:2406.06462, 2024. 8, 10" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 83, + 365, + 527, + 388 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 365, + 527, + 388 + ], + "spans": [ + { + "bbox": [ + 83, + 365, + 527, + 388 + ], + "type": "text", + "content": "[149] Xiaotian Zhang, Chunyang Li, Yi Zong, Zhengyu Ying, Liang He, and Xipeng Qiu. Evaluating the performance of large language models on gaokao benchmark. arXiv preprint arXiv:2305.12474, 2023. 16" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 83, + 393, + 527, + 415 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 393, + 527, + 415 + ], + "spans": [ + { + "bbox": [ + 83, + 393, + 527, + 415 + ], + "type": "text", + "content": "[150] Y Zhang, B Li, H Liu, Y Lee, L Gui, D Fu, J Feng, Z Liu, and C Li. Llava next: A strong zero-shot video understanding model. 2024. 16" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 84, + 421, + 527, + 453 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 421, + 527, + 453 + ], + "spans": [ + { + "bbox": [ + 84, + 421, + 527, + 453 + ], + "type": "text", + "content": "[151] Yi-Fan Zhang, Huanyu Zhang, Haochen Tian, Chaoyou Fu, Shuangqing Zhang, Junfei Wu, Feng Li, Kun Wang, Qingsong Wen, Zhang Zhang, et al. Mme-realworld: Could your multimodal llm challenge high-resolution real-world scenarios that are difficult for humans? arXiv preprint arXiv:2408.13257, 2024. 11" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 84, + 459, + 527, + 491 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 459, + 527, + 491 + ], + "spans": [ + { + "bbox": [ + 84, + 459, + 527, + 491 + ], + "type": "text", + "content": "[152] Zhenru Zhang, Chujie Zheng, Yangzhen Wu, Beichen Zhang, Runji Lin, Bowen Yu, Dayiheng Liu, Jingren Zhou, and Junyang Lin. The lessons of developing process reward models in mathematical reasoning. arXiv preprint arXiv:2501.07301, 2025. 7" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 84, + 497, + 527, + 529 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 497, + 527, + 529 + ], + "spans": [ + { + "bbox": [ + 84, + 497, + 527, + 529 + ], + "type": "text", + "content": "[153] Bingchen Zhao, Yongshuo Zong, Letian Zhang, and Timothy Hospedales. Benchmarking multi-image understanding in vision and language models: Perception, knowledge, reasoning, and multi-hop reasoning. arXiv preprint arXiv:2406.12742, 2024. 9, 11" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 84, + 535, + 527, + 567 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 535, + 527, + 567 + ], + "spans": [ + { + "bbox": [ + 84, + 535, + 527, + 567 + ], + "type": "text", + "content": "[154] Junjie Zhou, Yan Shu, Bo Zhao, Boya Wu, Shitao Xiao, Xi Yang, Yongping Xiong, Bo Zhang, Tiejun Huang, and Zheng Liu. Mlvu: A comprehensive benchmark for multi-task long video understanding. arXiv preprint arXiv:2406.04264, 2024. 14, 15" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 84, + 573, + 527, + 604 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 573, + 527, + 604 + ], + "spans": [ + { + "bbox": [ + 84, + 573, + 527, + 604 + ], + "type": "text", + "content": "[155] Chengke Zou, Xingang Guo, Rui Yang, Junyu Zhang, Bin Hu, and Huan Zhang. Dynamath: A dynamic visual benchmark for evaluating mathematical reasoning robustness of vision language models. arXiv preprint arXiv:2411.00836, 2024. 8, 9" + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "27" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 26 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2504_10xxx/2504.10481/29785fca-1f46-4ab1-92e1-b0b4c9aee15b_content_list.json b/data/2025/2504_10xxx/2504.10481/29785fca-1f46-4ab1-92e1-b0b4c9aee15b_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..746d6bec520890d4ffaafec3a9bd9ff87394fad4 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10481/29785fca-1f46-4ab1-92e1-b0b4c9aee15b_content_list.json @@ -0,0 +1,4013 @@ +[ + { + "type": "text", + "text": "Ding Chen $^{1*}$ Qingchen Yu $^{2*}$ Pengyuan Wang $^{2*}$ Wentao Zhang $^{3\\dagger}$", + "bbox": [ + 261, + 226, + 740, + 244 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Bo Tang² Feiyu Xiong² Xinchi Li¹ Minchuan Yang¹ Zhiyu Li²†", + "bbox": [ + 246, + 262, + 753, + 279 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Research Institute of China Telecom, Beijing, China", + "bbox": [ + 320, + 291, + 676, + 306 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "2 MemTensor (Shanghai) Technology Co., Ltd.", + "bbox": [ + 343, + 306, + 653, + 321 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "3 Center for Data Science, Peking University wentao.zhang@pku.edu.cn, lizy@iaar.ac.cn", + "bbox": [ + 326, + 321, + 671, + 349 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 459, + 386, + 537, + 401 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "With the release of the o1 model by OpenAI, reasoning models adopting slow thinking strategies have gradually emerged. As the responses generated by such models often include complex reasoning, intermediate steps, and self-reflection, existing evaluation methods are often inadequate. They struggle to determine whether the LLM output is truly equivalent to the reference answer, and also have difficulty identifying and extracting the final answer from long, complex responses. To address this issue, we propose xVerify, an efficient answer verifier for reasoning model evaluations. xVerify demonstrates strong capability in equivalence judgment, enabling it to effectively determine whether the answers produced by reasoning models are equivalent to reference answers across various types of objective questions. To train and evaluate xVerify, we construct the VAR dataset by collecting question-answer pairs generated by multiple LLMs across various datasets, leveraging multiple reasoning models and challenging evaluation sets designed specifically for reasoning model assessment. A multi-round annotation process is employed to ensure label accuracy. Based on the VAR dataset, we train multiple xVerify models of different scales. In evaluation experiments conducted on both the test set and generalization set, all xVerify models achieve overall F1 scores and accuracy exceeding $95\\%$ . Notably, the smallest variant, xVerify-0.5B-I, outperforms all evaluation methods except GPT-4o, while xVerify-3B-Ib surpasses GPT-4o in overall performance. These results validate the effectiveness and generalizability of xVerify. All resources for xVerify are available at https://github.com/IAAR-Shanghai/xVerify.", + "bbox": [ + 228, + 417, + 767, + 708 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Introduction", + "text_level": 1, + "bbox": [ + 171, + 734, + 313, + 750 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "With the emergence of chain of thought (CoT) prompting [35], researchers began to explicitly encourage LLMs to generate intermediate reasoning steps, thereby enhancing their ability to handle complex tasks. Following this, OpenAI introduced the o1 model [15], which proposed the concepts of slow thinking and scaling at test time. Specifically, the model is trained to output a detailed reasoning process before generating a final answer, significantly improving its performance on complex tasks. Inspired by this paradigm, a variety of reasoning models have emerged, such as DeepSeek-R1 [3] trained with GRPO, OpenAI's o3-mini [26], and QwQ-32B [31]. However, the rise of reasoning models poses substantial challenges for evaluation. Since the outputs of these models often contain", + "bbox": [ + 169, + 765, + 826, + 876 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.10481v1 [cs.CL] 14 Apr 2025", + "bbox": [ + 22, + 263, + 60, + 705 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "#", + "bbox": [ + 189, + 125, + 220, + 150 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "xVerify: Efficient Answer Verifier for Reasoning Model Evaluations", + "bbox": [ + 223, + 128, + 807, + 176 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "*Equal contribution. † Corresponding authors", + "bbox": [ + 189, + 885, + 465, + 898 + ], + "page_idx": 0 + }, + { + "type": "footer", + "text": "Preprint. Under review.", + "bbox": [ + 171, + 922, + 313, + 936 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "lengthy reasoning processes—potentially including redundant information, intermediate results, and even self-contradictions—it becomes significantly more difficult for evaluation tools to extract the final answer from such responses [2].", + "bbox": [ + 169, + 90, + 823, + 133 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Developing evaluation methods tailored for LLM responses involving complex reasoning has become a key research focus. LLM reasoning is typically categorized into commonsense, logical, multihop, and mathematical reasoning [8]. Existing evaluation methods fall into automatic and human evaluation [2], with automatic evaluation gaining prominence due to its scalability and lower cost. The main automatic approaches for evaluating reasoning models include rule-based evaluation frameworks [13, 5, 27, 9, 25] and LLM-based judgment methods [20, 7, 18]. However, both approaches face limitations in reasoning model evaluation. Rule-based frameworks often struggle to extract final answers from lengthy reasoning traces, rely on strict formatting (e.g., syntactically correct LaTeX), and typically ignore the reasoning process itself—an oversimplification challenged by many researchers [36, 33, 14, 32]. Judge models are usually not optimized for reasoning evaluation and mainly produce qualitative scores or comments [7], making them more suitable for subjective questions. Objective tasks, in contrast, require accurate binary classification. Currently, effective automatic methods specifically designed for evaluating reasoning on objective questions remain lacking.", + "bbox": [ + 169, + 138, + 826, + 335 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To address these challenges, we introduce xVerify, an efficient LLM-answer verifier tailored for evaluating LLM responses to objective questions. xVerify processes the full LLM output, enabling it to accurately identify final answers from complex reasoning traces. It also supports robust equivalence checking, including symbol conversion (e.g., 'alpha' $\\rightarrow$ 'α'), mathematical expression matching, and semantic alignment in natural language. Moreover, it is tolerant of formatting errors such as malformed LaTeX, making it applicable to a wide range of tasks, including math problems, multiple-choice, short-answer, and classification questions. To train and evaluate xVerify, we construct the Verify Answer for Reasoning (VAR) dataset, which includes responses from 19 LLMs across 24 reasoning benchmarks. All labels are verified through multi-round GPT-4o and human review. The dataset covers advanced reasoning models and benchmarks like GPQA, LiveMathBench, and AIME 2024. We fine-tune xVerify on a variety of base models (e.g., Qwen2.5, LLaMA, Gemma 2) and scales (0.5B-32B). Remarkably, even the smallest variant (xVerify-0.5B-I) surpasses existing evaluation methods—including 32B-sized models—on all metrics, while larger variants achieve F1 and accuracy over $95\\%$ on both test and generalization sets.", + "bbox": [ + 169, + 339, + 826, + 534 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The main contributions of this paper are summarized as follows:", + "bbox": [ + 171, + 539, + 594, + 554 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We construct the VAR dataset, which contains answer samples from 19 LLMs across 24 evaluation benchmarks. The dataset is annotated via multiple rounds of GPT-4o and human review, and is designed for training and evaluating judge models for reasoning tasks.", + "- We propose xVerify, an efficient answer verifier for reasoning model evaluations, and release multiple fine-tuned versions of xVerify. The checkpoints are publicly available2.", + "- We conduct comprehensive comparative evaluations against multiple existing evaluation frameworks and judge models on both test and generalization datasets, thoroughly validating the effectiveness and applicability of xVerify." + ], + "bbox": [ + 215, + 561, + 823, + 681 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2 Related Work", + "text_level": 1, + "bbox": [ + 171, + 700, + 321, + 715 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Evaluation methods have always been a crucial component in the development of LLM [2]. However, the open-ended nature of LLM outputs makes it difficult to apply standardized metrics, limiting the effectiveness of traditional evaluation methods [20]. The rise of reasoning models [26, 3, 31], which often generate lengthy and complex reasoning, further complicates evaluation. For objective tasks, the main challenge is to accurately extract the final answer from the LLM's semi-structured output and compare it with the reference answer. Existing approaches are typically divided into human evaluation and automatic evaluation. While human evaluation offers flexibility, automatic methods are more cost-efficient and consistent [2]. Current automatic methods mainly include rule-based evaluation frameworks and LLM-based judgment methods.", + "bbox": [ + 169, + 729, + 826, + 856 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Rule-based methods are widely used in automatic evaluation frameworks such as LM Eval Harness [5], OpenCompass [27], UltraEval [9], and OpenAI Evalu [25]. Tools like Math-Verify [13] also follow", + "bbox": [ + 169, + 861, + 826, + 890 + ], + "page_idx": 1 + }, + { + "type": "page_footnote", + "text": "$^{2}$ Hugging Face collections: https://huggingface.co/collections/IAAR-Shanghai/xverify", + "bbox": [ + 191, + 896, + 705, + 912 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "this approach, extracting final answers using regular expressions (RegEx) and comparing them with reference answers. However, LLM outputs often contain final answers in varied surface forms—e.g., \"alpha\" vs. \"α\", \"A\" vs. \"a\", or \"1000\" vs. \"10³\"—which can be semantically equivalent but textually different. While some tools support limited transformations, they typically handle only LaTeX expressions or simple string patterns, and struggle with basic semantic equivalence like \"one hundred\" vs. \"100\". For reasoning models, the output is usually lengthy and involves complex reasoning steps with intermediate results. This makes it difficult for regular expressions to accurately identify the final answer, causing rule-based approaches to frequently fail in such contexts. Moreover, prior work has shown that LLMs may revise or overturn their initial predictions during extended reasoning processes, exhibiting a kind of self-reflection [32]. At the same time, rule-based methods typically ignore the reasoning process and only evaluate the final answer, which has drawn criticism from many researchers—especially in the context of reasoning models [36, 33, 14]. Thus, rule-based evaluations have limited applicability in reasoning scenarios.", + "bbox": [ + 169, + 90, + 826, + 272 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "LLM-based judgment methods use fine-tuned LLMs to evaluate the quality of other LLMs' responses. Compared to traditional evaluation methods, they offer greater task adaptability, generate interpretable results, reduce evaluation costs, and can be applied across the LLM lifecycle [20, 7, 18]. For objective questions, these judge models can extract final answers from responses with intermediate reasoning or self-reflection. In recent years, many LLM-based judge models have emerged, including JudgeLM [39], PandaLM [34], Auto-J [21], Prometheus 2 [17], CompassJudger [1], CritiqueLLM [16], and Themis [12]. Judge models typically support pointwise, pairwise, and listwise evaluations [20], and some also serve as reward models in reinforcement learning. However, most are designed to assign scores to LLM outputs, making them more suitable for subjective evaluations like helpfulness, reliability, or relevance. For objective questions that require binary decisions (\"correct\" or \"incorrect\"), these models are less effective. Although scores can be binarized using thresholds, this approach is unreliable, as the models are not explicitly trained for such tasks. Moreover, the current LLM-based critic models and PRMs (Process Reward Models) exhibit subpar performance when detecting errors in long chain-of-thought responses generated by reasoning models [10]. Thus, while judge model holds promise for evaluating reasoning models, they require targeted training.", + "bbox": [ + 169, + 277, + 826, + 486 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In summary, automatic evaluation on objective tasks remains underdeveloped. Rule-based and LLM-based methods each have clear limitations, while human annotation is costly and hard to scale. To address these challenges, we propose xVerify, a robust and targeted judge model specifically designed for objective evaluation of LLMs.", + "bbox": [ + 169, + 491, + 826, + 547 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3 Problem Definition", + "text_level": 1, + "bbox": [ + 171, + 566, + 366, + 580 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "To evaluate the correctness of LLM responses to objective questions, the key is to extract the final answer from the response and compare it with the reference answer. We formally define this evaluation task as follows:", + "bbox": [ + 169, + 598, + 823, + 638 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We formalize this task as a 4-tuple $(\\mathrm{Q},\\mathrm{R},\\mathrm{A}_{\\mathrm{ref}},\\mathrm{E})$ , where $\\mathrm{Q} = \\{q_1,q_2,\\dots,q_n\\}$ is the set of questions, $\\mathrm{R} = \\{r_1,r_2,\\dots,r_n\\mid r_i = \\mathcal{W}(q_i)\\}$ is the set of responses generated by an LLM $\\mathcal{W}$ , $\\mathrm{A}_{\\mathrm{ref}} = \\{a_{ref}^{1},\\dots,a_{ref}^{n}\\}$ is the set of reference answers, and $\\mathrm{E}:\\mathrm{Q}\\times \\mathrm{R}\\times \\mathrm{A}_{\\mathrm{ref}}\\to 0,1$ is the evaluation function that returns 1 if the response is correct and 0 otherwise.", + "bbox": [ + 169, + 646, + 826, + 703 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "For the stage of extracting the final answer, given a response $r$ to question $q$ , which may include intermediate reasoning and multiple candidate answers, we denote the extracted candidates as $\\mathrm{A}(r)$ . To identify the final answer, we define a scoring function $\\mathrm{S} : \\mathrm{A}(r) \\times \\mathrm{Q} \\to \\mathbb{R}$ that measures the relevance or suitability of each candidate $a \\in \\mathrm{A}(r)$ to $q$ , and select the final answer using the extraction function:", + "bbox": [ + 169, + 709, + 825, + 779 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\varepsilon (q, r) = \\arg \\max _ {a \\in \\mathrm {A} (r)} \\mathrm {S} (a, q). \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 403, + 800, + 823, + 821 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "For the equivalence comparison stage, we define an equivalence function $\\psi : \\mathrm{A}_{\\mathrm{ref}} \\times \\mathrm{A}_{\\mathrm{final}} \\to \\{0,1\\}$ , where $\\psi$ returns 1 if the predicted answer is equivalent to the reference, and 0 otherwise. Since answers may appear in different forms, $\\psi$ integrates results from the following three sub-functions:", + "bbox": [ + 169, + 835, + 826, + 878 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "For mathematical expressions, we define a composite normalization function $\\Phi_{\\mathrm{norm}}^{\\mathrm{math}} = \\phi_{\\mathrm{err}} \\circ \\phi_{\\mathrm{syn}} \\circ \\phi_{\\mathrm{alg}} \\circ \\phi_{\\mathrm{dim}}$ , where $\\phi_{\\mathrm{err}}$ repairs minor syntax errors, $\\phi_{\\mathrm{syn}}$ unifies syntactic structures, $\\phi_{\\mathrm{alg}}$ performs", + "bbox": [ + 169, + 883, + 823, + 912 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 935, + 503, + 946 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "algebraic simplification, and $\\phi_{\\mathrm{dim}}$ ensures consistency in physical units. By transforming expressions into a canonical form, $\\Phi_{\\mathrm{norm}}^{math}$ enables reliable equivalence comparison:", + "bbox": [ + 169, + 90, + 823, + 122 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\psi_ {m a t h} \\left(a _ {r e f} ^ {m a t h}, a _ {f i n a l} ^ {m a t h}\\right) = \\left\\{ \\begin{array}{l l} 1 & \\text {i f} \\Phi_ {\\text {n o r m}} ^ {m a t h} \\left(a _ {r e f} ^ {m a t h}\\right) = \\Phi_ {\\text {n o r m}} ^ {m a t h} \\left(a _ {f i n a l} ^ {m a t h}\\right), \\\\ 0 & \\text {o t h e r w i s e} \\end{array} \\right. \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 274, + 141, + 825, + 176 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "For natural language answers, we define a comparison function $\\psi_{\\mathrm{nl}}: \\mathrm{A}_{\\mathrm{ref}}^{\\mathrm{nl}} \\times \\mathrm{A}_{\\mathrm{final}}^{\\mathrm{nl}} \\to \\{0,1\\}$ to assess semantic equivalence. Specifically, we introduce a semantic alignment function $\\phi_{\\mathrm{align}}^{nl}$ to measure the similarity between two textual answers. The equivalence decision is made by comparing the alignment score with a predefined threshold $\\tau$ :", + "bbox": [ + 169, + 193, + 823, + 253 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\psi_ {n l} \\left(a _ {r e f} ^ {n l}, a _ {f i n a l} ^ {n l}\\right) = \\left\\{ \\begin{array}{l l} 1 & \\text {i f} \\phi_ {\\text {a l i g n}} ^ {n l} \\left(a _ {r e f} ^ {n l}, a _ {f i n a l} ^ {n l}\\right) \\geq \\tau , \\\\ 0 & \\text {o t h e r w i s e} \\end{array} \\right. \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 320, + 273, + 825, + 310 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "For symbolic representations, we define a composite normalization function $\\Phi_{\\mathrm{norm}}^{sym} = \\phi_{\\mathrm{uni}} \\circ \\phi_{\\mathrm{font}} \\circ \\phi_{\\mathrm{dom}}$ which unifies symbols by applying $\\phi_{\\mathrm{uni}}$ for Unicode normalization, $\\phi_{\\mathrm{font}}$ for aligning font styles, and $\\phi_{\\mathrm{dom}}$ for domain-specific mappings. This produces a standardized form for character-level comparison, and the $\\Phi_{\\mathrm{norm}}^{sym}$ is defined as:", + "bbox": [ + 169, + 325, + 826, + 383 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\psi_ {s y m} \\left(a _ {r e f} ^ {s y m}, a _ {f i n a l} ^ {s y m}\\right) = \\left\\{ \\begin{array}{l l} 1 & \\text {i f} \\Phi_ {\\text {n o r m}} ^ {s y m} \\left(a _ {r e f} ^ {s y m}\\right) = \\Phi_ {\\text {n o r m}} ^ {s y m} \\left(a _ {f i n a l} ^ {s y m}\\right), \\\\ 0 & \\text {o t h e r w i s e} \\end{array} \\right. \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 290, + 404, + 825, + 445 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Based on the above components, we define a unified equivalence function $\\psi$ to determine whether the final answer $a_{final}$ matches the reference answer $a_{ref}$ across different modalities. The definition is:", + "bbox": [ + 169, + 459, + 823, + 489 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\psi \\left(a _ {\\text {f i n a l}}, a _ {\\text {r e f}}\\right) = \\left\\{ \\begin{array}{l l} 1, & \\text {i f} \\psi_ {\\text {m a t h}} \\left(a _ {\\text {f i n a l}} ^ {\\text {m a t h}}, a _ {\\text {r e f}} ^ {\\text {m a t h}}\\right) = 1 \\\\ & \\quad \\wedge \\psi_ {\\text {n l}} \\left(a _ {\\text {f i n a l}} ^ {\\text {n l}}, a _ {\\text {r e f}} ^ {\\text {n l}}\\right) = 1 \\\\ & \\quad \\wedge \\psi_ {\\text {s y m}} \\left(a _ {\\text {f i n a l}} ^ {\\text {s y m}}, a _ {\\text {r e f}} ^ {\\text {s y m}}\\right) = 1; \\\\ 0, & \\text {o t h e r w i s e} \\end{array} \\right. \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 310, + 508, + 825, + 579 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Here, $a_{final}^{math}, a_{final}^{nl}$ , and $a_{final}^{sym}$ represent the mathematical, natural language, and symbolic parts of the final answer, respectively, and similarly for $a_{ref}$ . This allows for equivalence checking in both unimodal and multimodal settings.", + "bbox": [ + 169, + 595, + 823, + 641 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "To summarize, the overall evaluation function $\\mathrm{E}$ is defined as:", + "bbox": [ + 169, + 646, + 578, + 659 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathrm {E} (q, r, a _ {r e f}) = \\psi (\\varepsilon (q, r), a _ {r e f}) \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 387, + 684, + 825, + 702 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $q$ is the objective question, $r$ is the response generated by the LLM, and $a_{ref}$ is the corresponding reference answer.", + "bbox": [ + 169, + 715, + 826, + 744 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4 Methodology", + "text_level": 1, + "bbox": [ + 169, + 767, + 316, + 785 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The xVerify training and evaluation pipeline includes three main stages: collecting LLM responses, VAR dataset construction, and xVerify judge pipeline (see Figure 1). We first gather question-response pairs from various LLMs across four types of objective questions, including complex, reasoning-intensive examples. To ensure accurate labels, we employ multiple rounds of annotation and rechecking using both GPT-4o and human annotators. We also apply data augmentation to increase the dataset's diversity and complexity. Finally, we train xVerify models of different sizes on the VAR dataset to evaluate long, multi-step answers—cases that are often difficult for existing evaluation methods. Section 4.1 details the dataset construction, and Section 4.2 describes the training process.", + "bbox": [ + 169, + 800, + 826, + 912 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/6d439d7f1c4119a53063123e4a1a272a2937ccfe8b265eebde7d815d650c02cb.jpg", + "image_caption": [ + "Figure 1: Framework of xVerify: (1) Collecting LLM Responses: aggregate responses from multiple LLMs across datasets covering four question types. (2) VAR Dataset Construction: employ GPT-4o and human annotators for labeling and rechecking, and use data augmentation to refine the dataset. (3) xVerify Judge Pipeline: accurately evaluate multi-component answers from reasoning models on challenging questions." + ], + "image_footnote": [], + "bbox": [ + 222, + 85, + 777, + 356 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.1 VAR Dataset", + "text_level": 1, + "bbox": [ + 171, + 452, + 303, + 465 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "xVerify is designed to assess the correctness of reasoning models' responses on objective questions. However, current judge models are mostly trained on tasks such as scoring or reviewing, and reasoning models with lengthy responses have only recently emerged. As a result, there is currently no suitable dataset for training xVerify. To better train and evaluate xVerify, we constructed a dedicated dataset named Verify Answer for Reasoning (VAR). Examples from the VAR dataset are provided in Appendix B.3.", + "bbox": [ + 169, + 479, + 823, + 564 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.1.1 LLM Response Generation", + "text_level": 1, + "bbox": [ + 171, + 582, + 415, + 597 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "To ensure the diversity and coverage of the dataset, we selected 19 mainstream LLMs and 24 frequently used multilingual datasets to generate and collect responses. To better simulate the answering patterns of reasoning models in common evaluation scenarios, the chosen LLMs include recently released models such as the DeepSeek-R1-Distill series [3] and QwQ-32B [31]. Most of the other LLMs also support context lengths exceeding $32k$ tokens, enabling them to produce answers with extended reasoning chains. The selected datasets include high-difficulty benchmarks commonly used for evaluating reasoning models, such as GPQA [28], AIME 2024 [24], MATH [11], and LiveCodeBench [23], which typically require multi-step reasoning and computation to solve. During data generation, we also retained some extremely long responses, such as those exceeding 6k characters in length. Detailed information on all LLMs and datasets is provided in Appendix A.", + "bbox": [ + 169, + 607, + 826, + 746 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "To train and evaluate xVerify more effectively, we grouped the 24 datasets into four types based on question and answer formats: multiple choice, math, short answer, and classification. Multiple choice questions offer several labeled options; math includes questions where answers are mathematical expressions (e.g., numbers, equations), including mathematics and physics problems; short answer questions expect brief natural language responses like names or dates, with no strict format constraints; classification tasks involve selecting the correct label, such as for sentiment or topic classification.", + "bbox": [ + 169, + 752, + 823, + 835 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "To reflect realistic evaluation settings and generate a diverse set of Q&A samples, we designed multiple prompt templates for guiding the LLMs in response generation. The prompt configurations vary along several dimensions: 0-shot vs. 5-shot, with or without CoT, and with or without answer format restrictions (restrict), resulting in eight distinct prompt types. Details of all prompt templates are provided in Appendix D.1.", + "bbox": [ + 169, + 842, + 823, + 912 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 935, + 503, + 946 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In total, we generated 191,600 Q&A samples using the 19 LLMs and 24 evaluation sets, providing a rich and diverse sample pool for constructing the dataset.", + "bbox": [ + 169, + 90, + 823, + 122 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.1.2 Dataset Partitioning", + "text_level": 1, + "bbox": [ + 171, + 133, + 366, + 148 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Based on the previously collected sample pool, we constructed the training, test, and generalization sets through filtering and preprocessing.", + "bbox": [ + 169, + 157, + 823, + 186 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The training and test sets are used to train and evaluate the xVerify model. Both are sampled from the same pool, sharing similar distributions. Specifically, they include samples generated by 15 LLMs across 17 evaluation sets, covering the four previously mentioned question types. The training set contains 36,941 samples, and the test set includes 5,194 samples.", + "bbox": [ + 169, + 191, + 823, + 248 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The generalization set complements the test set by evaluating xVerify's ability to handle more diverse and challenging distributions, reflecting real-world scenarios. It consists of 5,366 samples from 7 evaluation sets not used in the training or test sets, while still spanning all four question types. These samples are generated by 19 LLMs, including 4 models not seen in training or testing, such as the reasoning model QwQ-32B, resulting in greater diversity and distribution shift.", + "bbox": [ + 169, + 253, + 825, + 325 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Section 4.1.4 introduces our data augmentation strategy, which adds more challenging samples to all three sets. Detailed dataset statistics are provided in Appendix B.1.", + "bbox": [ + 169, + 329, + 823, + 359 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.1.3 Data Annotations", + "text_level": 1, + "bbox": [ + 171, + 372, + 348, + 386 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "To ensure the accuracy of xVerify's training and evaluation, we conducted multiple rounds of automatic and manual annotation across the three datasets. Specifically, we used GPT-4o to perform two rounds of annotation for all samples in the datasets, utilizing two distinct prompt templates (details provided in Appendix D.2) to improve annotation confidence [33, 22]. Given the large size of the training set, we only applied manual annotation to the more challenging math problems and to samples where the two rounds of GPT-4o annotations disagreed. In contrast, for the test and generalization sets, we manually annotated all samples, resulting in a three-round annotation process to maximize label reliability. Details of the manual annotation process are provided in Appendix B.2.", + "bbox": [ + 169, + 395, + 826, + 507 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.1.4 Data Augmentation", + "text_level": 1, + "bbox": [ + 171, + 521, + 361, + 536 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/c7b64161e6bad9b6a55e1d24b6081bbc80d1590623a46a0a5887f61e520e322d.jpg", + "image_caption": [ + "Figure 2: Data Augmentation Pipelines: (1) transformation of multiple-choice options through numbering conversion and noise injection, (2) diversification of mathematical answers via equivalent expression generation, and (3) final answer sentence transformation using prompt rephrasing, symbol wrapping, and gap token insertion." + ], + "image_footnote": [], + "bbox": [ + 225, + 547, + 777, + 739 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "To further enhance the diversity and robustness of the dataset, we designed a series of data augmentation strategies (illustrated in Figure 2) to better simulate real-world evaluation settings and improve the model's tolerance to varied answer formats.", + "bbox": [ + 169, + 806, + 826, + 849 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "For multiple-choice questions, we applied two types of augmentations: option index transformation and noise injection. The former converts alphabetical labels to Arabic or Roman numerals, while the latter randomly adds or removes irrelevant distractor options without changing the original question intent, thereby increasing structural complexity.", + "bbox": [ + 169, + 854, + 823, + 912 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 493, + 936, + 503, + 946 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "For math problems, we used two approaches: augmentation based on reference answers and LLM responses. In the first approach, we generated 3-5 mathematically equivalent expressions of each reference answer through symbolic and formal transformations, then created new samples accordingly. In the second, we applied the same transformation logic to the final answers in LLM responses, enriching the dataset with varied mathematical formats and helping the model learn equivalence across symbolic expressions.", + "bbox": [ + 169, + 90, + 823, + 175 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We also augmented the final answer statements. Specifically, we extracted answer-bearing sentences from responses generated using restrict prompts, and applied over 1,000 transformation patterns. These included: 20 variations of prompt rephrasing (e.g., \"The answer is B\" $\\rightarrow$ \"The most appropriate answer is B\"), 18 symbolic wrappers (e.g., wrapping B as $B$ ), and 5 forms of delimiter insertions (e.g., adding a colon or space before the answer). This improved diversity in answer formats and reduced overfitting to specific templates.", + "bbox": [ + 169, + 181, + 826, + 267 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Together, these strategies expanded the expressive space of the dataset while preserving semantic consistency, offering richer and more challenging training signals for xVerify. After augmentation, the sizes of the training, test, and generalization sets increased to 43,204, 6,122, and 6,468 samples respectively. Full dataset details are provided in Appendix B.1. The augmentation of math problems primarily relied on GPT-4o; prompt templates are listed in Appendix D.3.", + "bbox": [ + 169, + 273, + 823, + 345 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.2 Model Training", + "text_level": 1, + "bbox": [ + 171, + 362, + 321, + 378 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We trained 14 models with different parameter sizes and architectures using the training set from the VAR dataset. Specifically, we utilized the LLaMA-Factory framework [38] and QLoRA technique [4] for model training. Based on extensive experimentation, we set the number of epochs to 1 and selected a learning rate of 1e-4 as the optimal configuration, with other hyperparameters detailed in Appendix C.1. Many researchers have pointed out potential bias in using LLMs as judge models, where models from the same family tend to receive higher ratings [19]. To thoroughly evaluate the generalization capability of the xVerify method, we trained 14 models with varying parameter sizes and architectures. These models ranged from 0.5B to 32B parameters and included five different families, such as LLaMA 3 [6], Qwen2.5 [37], and Gemma 2 [30]. Details of the models used are provided in Appendix C.2.", + "bbox": [ + 169, + 388, + 823, + 527 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "5 Experiments", + "text_level": 1, + "bbox": [ + 171, + 550, + 313, + 568 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "In this section, we will present the configuration, results, and detailed analysis of the xVerify model evaluation experiments. First, we will outline the experimental setup:", + "bbox": [ + 169, + 584, + 823, + 613 + ], + "page_idx": 6 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Datasets: The datasets used in the evaluation experiments are the test set and generalization set from the VAR dataset. The test set is used to evaluate the xVerify model's performance, while the generalization set supplements the test set by simulating real-world scenarios with a broader sample distribution to assess the model's generalization ability.", + "- Metrics: The evaluation mainly uses accuracy and F1 score on both the test and generalization sets. Accuracy shows the model's overall performance, while the F1 score combines precision and recall for a more complete perspective.", + "- Baselines: There are two types of baselines: evaluation frameworks and judge models. The evaluation frameworks include DeepSeek-Math [29], LM Eval Harness [5], Math-Verify [13], OpenAI Evalu [25], OpenCompass [27], and UltraEval [9]. The judge models include PandaLM [34], Auto-J [21], Prometheus 2 [17], JudgeLM [39], and CompassJudger [1]. In addition, GPT-4o is also used as a judge model with two strategies: one with CoT and one without. The prompts for the judge model and xVerify are provided in Appendix D.4 and Appendix D.5." + ], + "bbox": [ + 215, + 627, + 823, + 837 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Test Set Evaluation Results. We evaluated all evaluation frameworks, judge models, and the xVerify model on the VAR test set (see Table 1). Overall, the xVerify model outperforms all evaluation frameworks and judge models, including GPT-4o, with the best and second-best values in each column appearing for the xVerify model.", + "bbox": [ + 169, + 854, + 823, + 912 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/f9faf40a1f834a9f752075ed85d7300770dd78771941ebd3aaf9e3b87f121ceb.jpg", + "table_caption": [ + "Table 1: Evaluation Accuracy Results on the Test Set. \"-\" indicates that the evaluation method is not applicable to the problem type. The best performance in each column will be shown in bold, and the second-best performance will be underlined." + ], + "table_footnote": [], + "table_body": "
Method TypeMethodMultiple ChoiceMathShort AnswerClassificationOverall
F1Acc.F1Acc.F1Acc.F1Acc.F1Acc.
Evaluation FrameworkDeepSeek Math Verify70.77%75.17%78.34%84.30%----74.90%52.52%
LM Eval Harness58.44%68.19%25.16%28.27%53.41%44.51%72.35%66.94%47.67%48.32%
Math-Verify5.88%53.76%82.55%86.70%42.27%71.91%0.00%29.66%45.64%65.91%
OpenAI Simple Evals23.61%28.02%66.79%76.88%42.23%55.32%73.29%67.87%51.17%58.10%
OpenCompass68.11%72.52%79.25%84.73%----74.18%79.64%
UltraEval17.34%18.04%8.88%56.89%----13.95%40.71%
Judge ModelPandaLM-7B-v14.26%8.12%16.78%14.46%23.47%17.72%25.32%16.79%16.40%13.72%
Auto-J-Bilingual-6B52.85%67.71%40.76%65.21%67.22%79.60%74.86%71.37%57.04%69.59%
Auto-J-13B40.00%63.20%26.32%60.62%64.41%78.22%86.04%82.60%53.38%68.13%
Prometheus-7B-v2.075.76%75.41%74.20%74.35%70.95%74.59%84.80%77.03%76.50%75.11%
Prometheus-8x7B-v2.071.26%68.61%71.99%66.92%76.24%77.70%83.27%77.65%74.57%71.12%
JudgeLM-7B-v1.056.53%42.57%46.09%34.58%60.33%50.56%83.89%73.22%59.02%45.90%
JudgeLM-13B-v1.056.81%48.89%58.39%59.46%77.32%79.52%95.63%93.82%68.57%65.83%
JudgeLM-33B-v1.042.86%43.24%44.82%46.03%57.86%62.23%73.42%67.56%52.00%51.75%
CompassJudger-1-1.5B49.95%35.54%61.66%48.78%57.36%46.93%82.51%70.96%61.94%48.35%
CompassJudger-1-7B70.05%62.78%66.62%58.86%67.47%65.08%92.99%89.50%72.72%65.96%
CompassJudger-1-14B58.94%44.62%55.09%40.76%59.66%52.90%90.87%86.61%63.22%51.37%
CompassJudger-1-32B95.09%95.37%84.11%84.30%94.95%96.11%98.45%97.84%91.67%91.69%
GPT-4o as Judge96.61%96.75%95.27%95.80%95.01%96.20%98.14%97.43%96.25%96.39%
GPT-4o as Judge (CoT)97.10%97.23%95.41%95.88%95.63%96.63%99.56%99.38%96.85%96.95%
xVerifyxVerify-0.5B-I97.78%97.90%93.74%94.64%96.72%97.49%99.71%99.59%96.69%96.85%
xVerify-3B-Ib97.31%97.41%95.65%96.18%96.38%97.23%99.78%99.69%97.17%97.27%
xVerify-7B-I97.75%97.84%95.94%96.44%96.51%97.32%99.78%99.69%97.41%97.50%
xVerify-9B-I97.43%97.53%95.75%96.27%96.06%96.97%99.78%99.69%97.19%97.29%
xVerify-14B-Ia97.49%97.59%95.73%96.22%95.41%96.46%99.63%99.49%97.06%97.16%
xVerify-32B-I97.81%97.90%95.88%96.31%96.18%97.06%99.71%99.59%97.32%97.40%
", + "bbox": [ + 174, + 137, + 823, + 405 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Among the evaluation frameworks, the best performers were DeepSeek Math Verify and OpenCompass, but neither achieved an F1 score nor accuracy exceeding $80\\%$ . Some evaluation frameworks were also not suitable for certain question types, which is an inherent limitation of rule-based methods—strong in specificity but limited in applicability. For instance, OpenCompass was completely unsuitable for short answer and classification questions. Additionally, the long reasoning processes generated by reasoning models made it difficult for evaluation frameworks to extract final answers, lowering their overall performance.", + "bbox": [ + 169, + 441, + 826, + 539 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Among judge models, GPT-4o and CompassJudger showed the best overall performance. The CompassJudger-1-32B model achieved F1 score and accuracy of $91.67\\%$ and $91.69\\%$ , respectively. However, the model performed poorly on math questions, with both F1 score and accuracy below $85\\%$ , indicating that it handles simpler questions well but struggles with formula equivalence in math problems. Furthermore, only the 32B version of this judge model achieved over $90\\%$ F1 score and accuracy, while smaller models performed below $80\\%$ . Therefore, the performance of CompassJudger-1-32B is more a result of the base model's capabilities rather than the subsequent training. For example, the smallest xVerify-0.5B-I model outperforms CompassJudger-1-32B across the board, indicating that the VAR training set significantly improves model evaluation performance. GPT-4o's overall performance is very close to xVerify, but the improvement after using CoT is small, with token consumption nearly doubling. Specifically, GPT-4o as Judge evaluated the entire test set at a cost of $13.09, while GPT-4o as Judge (CoT) cost $20.15 (using the OpenAI API, charged by token count).", + "bbox": [ + 169, + 544, + 826, + 726 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In contrast, even the smallest xVerify-0.5B-I model outperforms all methods except GPT-4o as Judge (CoT) in overall performance, and the xVerify-3B-Ib model surpasses all others in every evaluation metric. Moreover, for more difficult math questions, all xVerify models except xVerify-0.5B-I exceeded $95\\%$ performance. We also found that the performance of the xVerify model improves as the parameter size increases, but slightly decreases after exceeding 7B parameters, likely due to overfitting on the VAR training set, which is sufficiently large for smaller models.", + "bbox": [ + 169, + 729, + 828, + 816 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Generalization Set Evaluation Results. To better assess the performance of xVerify on a broader sample distribution, we evaluated all methods on the VAR generalization set, as shown in Table 2. On the generalization set, the xVerify model showed a slight decrease in overall performance. However, the drop in both F1 score and accuracy was less than $1.5\\%$ , while other methods showed mixed results. Overall, the xVerify model still outperformed all other methods, indicating that although", + "bbox": [ + 169, + 842, + 828, + 912 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "overfitting exists in xVerify, it is limited and the model maintains strong generalization ability on samples outside the training set distribution.", + "bbox": [ + 171, + 90, + 823, + 119 + ], + "page_idx": 8 + }, + { + "type": "table", + "img_path": "images/9692347bd77c26f19e069abddee6ccb60a7779865398c8c3609ace614cb755fb.jpg", + "table_caption": [ + "Table 2: Evaluation Accuracy Results on the Generalization Set. \"--\" indicates that the evaluation method is not applicable to the problem type. The best performance in each column will be shown in bold, and the second-best performance will be underlined." + ], + "table_footnote": [], + "table_body": "
Method TypeMethodMultiple ChoiceMathShort AnswerClassificationOverall
F1Acc.F1Acc.F1Acc.F1Acc.F1Acc.
Evaluation FrameworkDeepSeek Math Verify72.90%73.39%11.69%79.83%----60.57%44.42%
LM Eval Harness61.60%65.37%7.03%18.48%58.22%45.09%92.06%88.21%55.81%51.30%
Math-Verify5.19%45.10%64.18%87.68%9.12%52.75%0.00%24.59%16.10%55.53%
OpenAI Simple Evals28.72%29.23%24.31%78.90%58.33%59.58%94.39%91.62%57.99%63.36%
OpenCompass71.64%71.44%47.22%84.39%----65.74%78.18%
UltraEval16.29%15.31%13.55%78.39%----15.71%48.13%
Judge ModelPandaLM-7B-v14.28%7.85%9.91%15.97%45.81%31.43%36.23%25.99%23.74%19.14%
Auto-J-Bilingual-6B52.07%60.75%10.56%74.79%85.16%86.76%84.90%79.91%67.20%74.57%
Auto-J-13B34.87%52.78%9.86%76.54%85.12%86.97%77.67%71.99%60.43%71.35%
Prometheus-7B-v2.076.67%73.66%49.08%71.46%81.52%81.32%79.59%71.92%73.85%74.35%
Prometheus-8x7B-v2.074.13%68.60%49.48%60.27%87.15%86.13%84.70%77.19%74.51%71.69%
JudgeLM-7B-v1.060.22%45.71%12.71%15.40%72.15%62.51%86.11%76.18%59.11%46.38%
JudgeLM-13B-v1.065.39%57.80%21.61%44.87%86.11%84.53%91.78%86.89%69.18%65.63%
JudgeLM-33B-v1.046.99%45.10%20.31%39.99%71.34%66.69%41.92%33.36%46.06%46.01%
CompassJudger-1-1.5B55.75%40.87%34.53%33.62%63.93%51.57%84.49%73.93%60.01%47.65%
CompassJudger-1-7B74.31%65.20%38.27%39.89%88.99%88.15%93.29%89.29%73.47%67.47%
CompassJudger-1-14B63.65%49.50%27.63%21.20%73.61%66.48%88.97%81.92%63.10%51.21%
CompassJudger-1-32B92.93%92.32%72.05%84.91%96.81%96.86%98.05%97.05%91.90%92.04%
GPT-4o as Judge95.86%95.38%87.91%94.76%97.46%97.49%98.67%97.98%96.03%96.18%
GPT-4o as Judge (CoT)95.44%94.88%88.34%94.71%97.39%97.42%98.36%97.52%95.79%95.92%
xVerifyxVerify-0.5B-I96.49%96.10%80.00%91.94%96.95%97.00%99.03%98.53%95.29%95.53%
xVerify-3B-Ib96.21%95.71%86.20%94.15%97.60%97.63%99.03%98.53%96.08%96.23%
xVerify-7B-I96.16%95.66%87.86%94.87%97.45%97.49%98.93%98.37%96.22%96.37%
xVerify-9B-I96.06%95.55%87.47%94.76%97.53%97.56%99.13%98.68%96.23%96.38%
xVerify-14B-Ia96.11%95.60%90.20%95.74%97.32%97.35%99.13%98.68%96.53%96.65%
xVerify-32B-I96.22%95.71%90.09%95.59%97.32%97.35%99.03%98.53%96.50%96.60%
", + "bbox": [ + 174, + 183, + 823, + 450 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Specifically, the overall F1 score and accuracy of all evaluation frameworks remained below $80\\%$ with only OpenCompass achieving an overall accuracy above $70\\%$ . This indicates that rule-based evaluation frameworks have significant limitations in generalization performance, struggling to effectively handle the diverse answers and evaluation sets from LLMs.", + "bbox": [ + 169, + 464, + 826, + 521 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Among the judge models, the best-performing ones remained GPT-4o and CompassJudger. However, all judge models except GPT-4o had an F1 score below $75\\%$ on math questions, with most models scoring below $50\\%$ , indicating that judge models almost entirely fail in evaluating more diverse and complex math problems. GPT-4o as Judge and GPT-4o as Judge (CoT) also failed to achieve an F1 score above $90\\%$ on math problems, suggesting that the math samples in the generalization set indeed present challenges for evaluation methods. Furthermore, GPT-4o's performance did not improve after using CoT; instead, it showed a slight decline. This suggests that in broader scenarios, CoT-based prompt engineering methods do not effectively improve GPT-4o's performance as a judge model, and model fine-tuning may be a better option.", + "bbox": [ + 169, + 526, + 825, + 652 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "In contrast, the xVerify-0.5B-I model outperformed all evaluation methods except GPT-4o, and the xVerify-3B-Ib model outperformed both CoT-based GPT-4o methods. For more difficult math problems, the F1 score and accuracy of the xVerify-14B-Ia and xVerify-32B-I models exceeded $90\\%$ . Additionally, we observed that as the parameter size of the xVerify model increased, the performance drop on the generalization set decreased. For example, the accuracy drop for xVerify-0.5B-I was $1.33\\%$ , $0.91\\%$ for xVerify-9B-I, and $0.80\\%$ for xVerify-32B-I, suggesting that larger xVerify models exhibit stronger generalization performance.", + "bbox": [ + 169, + 657, + 826, + 755 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Furthermore, we comprehensively evaluated the performance of 14 x Verify models on both the test and generalization sets, and tested the computational efficiency of all x Verify and judge models, along with the evaluation cost of GPT-4o as a judge model. The results showed that x Verify models outperform other judge models in both usage cost and evaluation efficiency. Full experimental results can be found in Appendix E.", + "bbox": [ + 169, + 761, + 826, + 830 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "6 Conclusion", + "text_level": 1, + "bbox": [ + 171, + 851, + 302, + 867 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "In this paper, we propose an efficient answer verifier for reasoning model evaluations, named xVerify, which can effectively assess the correctness of long reasoning responses generated by", + "bbox": [ + 169, + 883, + 823, + 912 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 491, + 935, + 504, + 946 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "reasoning models on various difficult objective questions. To train and evaluate the xVerify model, we constructed the VAR dataset based on several popular LLMs and evaluation sets. This dataset primarily collects long reasoning responses generated by reasoning models on challenging questions, and multiple rounds of labeling and verification were conducted using GPT-4o and human annotators. Ultimately, we trained multiple xVerify models of varying specifications based on the VAR dataset and performed comparative evaluations with several evaluation frameworks and judge models on both the test and generalization sets. The experimental results show that even the smallest xVerify-0.5B-I model outperforms all methods except GPT-4o, and larger xVerify models surpass all other methods, demonstrating the effectiveness and generalization ability of xVerify.", + "bbox": [ + 169, + 90, + 826, + 217 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 173, + 234, + 267, + 250 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Maosong Cao, Alexander Lam, Haodong Duan, Hongwei Liu, Songyang Zhang, and Kai Chen. Compassjudger-1: All-in-one judge model helps model evaluation and evolution. arXiv preprint arXiv:2410.16256, 2024.", + "[2] Yupeng Chang, Xu Wang, Jindong Wang, Yuan Wu, Linyi Yang, Kaijie Zhu, Hao Chen, Xiaoyuan Yi, Cunxiang Wang, Yidong Wang, et al. A survey on evaluation of large language models. ACM transactions on intelligent systems and technology, 15(3):1-45, 2024.", + "[3] DeepSeek-AI, Daya Guo, Dejian Yang, et al. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning, 2025.", + "[4] Tim Dettmers, Artidoro Pagnoni, Ari Holtzman, and Luke Zettlemoyer. Qlora: Efficient finetuning of quantized llms. Advances in neural information processing systems, 36:10088-10115, 2023.", + "[5] Leo Gao, Jonathan Tow, Baber Abbasi, Stella Biderman, Sid Black, Anthony DiPofi, Charles Foster, Laurence Golding, Jeffrey Hsu, Alain Le Noac'h, Haonan Li, Kyle McDonell, Niklas Muennighoff, Chris Ociepa, Jason Phang, Laria Reynolds, Hailey Schoelkopf, Aviya Skowron, Lintang Sutawika, Eric Tang, Anish Thite, Ben Wang, Kevin Wang, and Andy Zou. A framework for few-shot language model evaluation, September 2021.", + "[6] Aaron Grattafori, Abhimanyu Dubey, Abhinav Jauhri, et al. The llama 3 herd of models, 2024.", + "[7] Jiawei Gu, Xuhui Jiang, Zhichao Shi, Hexiang Tan, Xuehao Zhai, Chengjin Xu, Wei Li, Yinghan Shen, Shengjie Ma, Honghao Liu, Saizhuo Wang, Kun Zhang, Yuzhuo Wang, Wen Gao, Lionel Ni, and Jian Guo. A survey on llm-as-a-judge, 2025.", + "[8] Zishan Guo, Renren Jin, Chuang Liu, Yufei Huang, Dan Shi, Supryadi, Linhao Yu, Yan Liu, Jiaxuan Li, Bojian Xiong, and Deyi Xiong. Evaluating large language models: A comprehensive survey, 2023.", + "[9] Chaoqun He, Renjie Luo, Shengding Hu, Yuanqian Zhao, Jie Zhou, Hanghao Wu, Jiajie Zhang, Xu Han, Zhiyuan Liu, and Maosong Sun. Ultraeval: A lightweight platform for flexible and comprehensive evaluation for llms. arXiv preprint arXiv:2404.07584, 2024.", + "[10] Yancheng He, Shilong Li, Jiaheng Liu, Weixun Wang, Xingyuan Bu, Ge Zhang, Zhongyuan Peng, Zhaoxiang Zhang, Zhicheng Zheng, Wenbo Su, and Bo Zheng. Can large language models detect errors in long chain-of-thought reasoning?, 2025.", + "[11] Dan Hendrycks, Collin Burns, Saurav Kadavath, Akul Arora, Steven Basart, Eric Tang, Dawn Song, and Jacob Steinhardt. Measuring mathematical problem solving with the math dataset. In J. Vanschoeren and S. Yeung, editors, Proceedings of the Neural Information Processing Systems Track on Datasets and Benchmarks, volume 1, 2021.", + "[12] Xinyu Hu, Li Lin, Mingqi Gao, Xunjian Yin, and Xiaojun Wan. Themis: A reference-free nlg evaluation language model with flexibility and interpretability. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, pages 15924-15951, 2024.", + "[13] Greg Gandenberger Hynek Kydlíček. GitHub - huggingface/Math-Verify: A robust mathematical expression evaluation system designed for assessing Large Language Model outputs in mathematical tasks., 2024." + ], + "bbox": [ + 173, + 258, + 826, + 910 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[14] Alon Jacovi, Yonatan Bitton, Bernd Bohnet, Jonathan Herzig, Or Honovich, Michael Tseng, Michael Collins, Roee Aharoni, and Mor Geva. A chain-of-thought is as strong as its weakest link: A benchmark for verifiers of reasoning chains. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 4615–4634, Bangkok, Thailand, August 2024. Association for Computational Linguistics.", + "[15] Aaron Jaech, Adam Kalai, Adam Lerer, Adam Richardson, Ahmed El-Kishky, Aiden Low, Alec Helyar, Aleksander Madry, Alex Beutel, Alex Carney, et al. Openai o1 system card. arXiv preprint arXiv:2412.16720, 2024.", + "[16] Pei Ke, Bosi Wen, Zhuoer Feng, Xiao Liu, Xuanyu Lei, Jiale Cheng, Shengyuan Wang, Aohan Zeng, Yuxiao Dong, Hongning Wang, Jie Tang, and Minlie Huang. Critiquellm: Towards an informative critique generation model for evaluation of large language model generation. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics, 2024.", + "[17] Seungone Kim, Juyoung Suk, Shayne Longpre, Bill Yuchen Lin, Jamin Shin, Sean Welleck, Graham Neubig, Moontae Lee, Kyungjae Lee, and Minjoon Seo. *Prometheus* 2: An open source language model specialized in evaluating other language models. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen, editors, *Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing*, pages 4334–4353, Miami, Florida, USA, November 2024. Association for Computational Linguistics.", + "[18] Dawei Li, Bohan Jiang, Liangjie Huang, Alimohammad Beigi, Chengshuai Zhao, Zhen Tan, Amrita Bhattacharjee, Yuxuan Jiang, Canyu Chen, Tianhao Wu, Kai Shu, Lu Cheng, and Huan Liu. From generation to judgment: Opportunities and challenges of llm-as-a-judge, 2025.", + "[19] Dawei Li, Renliang Sun, Yue Huang, Ming Zhong, Bohan Jiang, Jiawei Han, Xiangliang Zhang, Wei Wang, and Huan Liu. Preference leakage: A contamination problem in llm-as-a-judge, 2025.", + "[20] Haitao Li, Qian Dong, Junjie Chen, Huixue Su, Yujia Zhou, Qingyao Ai, Ziyi Ye, and Yiqun Liu. Llms-as-judges: A comprehensive survey on llm-based evaluation methods, 2024.", + "[21] Junlong Li, Shichao Sun, Weizhe Yuan, Run-Ze Fan, hai zhao, and Pengfei Liu. Generative judge for evaluating alignment. In The Twelfth International Conference on Learning Representations, 2024.", + "[22] Xun Liang, Shichao Song, Zifan Zheng, Hanyu Wang, Qingchen Yu, Xunkai Li, Rong-Hua Li, Yi Wang, Zhonghao Wang, Feiyu Xiong, and Zhiyu Li. Internal consistency and self-feedback in large language models: A survey, 2024.", + "[23] Junnan Liu, Hongwei Liu, Linchen Xiao, Ziyi Wang, Kuikun Liu, Songyang Gao, Wenwei Zhang, Songyang Zhang, and Kai Chen. Are your llms capable of stable reasoning? arXiv preprint arXiv:2412.13147, 2024.", + "[24] MAA. American invitational mathematics examination - aide. American Invitational Mathematics Examination - AIME 2024, February 2024.", + "[25] OpenAI. GitHub - openai/evals: Evals is a framework for evaluating LLMs and LLM systems, and an open-source registry of benchmarks., 2024.", + "[26] OpenAI. Openai o3-mini, 2025.", + "[27] OpenMMLab. Opencompass: A universal evaluation platform for foundation models. https://github.com/open-compass/opencompass, 2023.", + "[28] David Rein, Betty Li Hou, Asa Cooper Stickland, Jackson Petty, Richard Yuanzhe Pang, Julien Dirani, Julian Michael, and Samuel R. Bowman. GPQA: A graduate-level google-proof q&a benchmark. In First Conference on Language Modeling, 2024.", + "[29] Zhihong Shao, Peiyi Wang, Qihao Zhu, et al. Deepseekmath: Pushing the limits of mathematical reasoning in open language models, 2024." + ], + "bbox": [ + 173, + 90, + 826, + 912 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 490, + 935, + 506, + 946 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[30] Gemma Team, Morgane Riviere, Shreya Pathak, et al. Gemma 2: Improving open language models at a practical size, 2024.", + "[31] Qwen Team. Qwq-32b: Embracing the power of reinforcement learning, March 2025.", + "[32] Xinpeng Wang, Bolei Ma, Chengzhi Hu, Leon Weber-Genzel, Paul Röttger, Frauke Kreuter, Dirk Hovy, and Barbara Plank. \"my answer is C\": First-token probabilities do not match text answers in instruction-tuned language models. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Findings of the Association for Computational Linguistics: ACL 2024, pages 7407–7416, Bangkok, Thailand, August 2024. Association for Computational Linguistics.", + "[33] Xuezhi Wang, Jason Wei, Dale Schuurmans, Quoc V Le, Ed H. Chi, Sharan Narang, Aakanksha Chowdhery, and Denny Zhou. Self-consistency improves chain of thought reasoning in language models. In The Eleventh International Conference on Learning Representations, 2023.", + "[34] Yidong Wang, Zhuohao Yu, Zhengran Zeng, Linyi Yang, Cunxiang Wang, Hao Chen, Chaoya Jiang, Rui Xie, Jindong Wang, Xing Xie, Wei Ye, Shikun Zhang, and Yue Zhang. Pandalm: An automatic evaluation benchmark for llm instruction tuning optimization. 2024.", + "[35] Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, brian richter, Fei Xia, Ed Chi, Quoc V Le, and Denny Zhou. Chain-of-thought prompting elicits reasoning in large language models. In S. Koyejo, S. Mohamed, A. Agarwal, D. Belgrave, K. Cho, and A. Oh, editors, Advances in Neural Information Processing Systems, volume 35, pages 24824-24837. Curran Associates, Inc., 2022.", + "[36] Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, brian richter, Fei Xia, Ed Chi, Quoc V Le, and Denny Zhou. Chain-of-thought prompting elicits reasoning in large language models. In S. Koyejo, S. Mohamed, A. Agarwal, D. Belgrave, K. Cho, and A. Oh, editors, Advances in Neural Information Processing Systems, volume 35, pages 24824-24837. Curran Associates, Inc., 2022.", + "[37] An Yang, Baosong Yang, Beichen Zhang, et al. Qwen2.5 technical report. arXiv preprint arXiv:2412.15115, 2024.", + "[38] Yaowei Zheng, Richong Zhang, Junhao Zhang, Yanhan Ye, Zheyan Luo, Zhangchi Feng, and Yongqiang Ma. Llamafactory: Unified efficient fine-tuning of $100+$ language models. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 3: System Demonstrations), Bangkok, Thailand, 2024. Association for Computational Linguistics.", + "[39] Lianghui Zhu, Xinggang Wang, and Xinlong Wang. JudgeLM: Fine-tuned large language models are scalable judges. In The Thirteenth International Conference on Learning Representations, 2025." + ], + "bbox": [ + 173, + 90, + 826, + 650 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Appendices", + "text_level": 1, + "bbox": [ + 171, + 87, + 341, + 116 + ], + "page_idx": 12 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "A Datasets and Models 14", + "B VAR Dataset Details 14" + ], + "bbox": [ + 173, + 147, + 825, + 195 + ], + "page_idx": 12 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "B.1 Details of Training, Test, and Generalization Sets 15", + "B.2 Details of Human Annotation 19", + "B.3 Examples from the VAR Dataset 21" + ], + "bbox": [ + 196, + 200, + 823, + 258 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "C Model Training Details 22", + "bbox": [ + 173, + 276, + 825, + 291 + ], + "page_idx": 12 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "C.1 Training Hyperparameters 22", + "C.2 Original Model Details 22" + ], + "bbox": [ + 196, + 297, + 823, + 333 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "D Prompts 22", + "bbox": [ + 173, + 349, + 825, + 366 + ], + "page_idx": 12 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "D.1 Prompts for Generating LLM Responses 22", + "D.2 Prompts for GPT-4o Annotation 23", + "D.3 Prompts for Data Augmentation 23", + "D.4 Prompts for Judge Model 23", + "D.5 Prompts for xVerify 25" + ], + "bbox": [ + 196, + 371, + 823, + 470 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "E Supplementary Experimental Results 25", + "bbox": [ + 173, + 487, + 825, + 503 + ], + "page_idx": 12 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "E.1 Evaluation Accuracy Results of All xVerify Models 25", + "E.2 Computational Efficiency and Operational Cost of xVerify and Judge Models 26" + ], + "bbox": [ + 196, + 508, + 823, + 545 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 490, + 935, + 506, + 946 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "A Datasets and Models", + "text_level": 1, + "bbox": [ + 174, + 89, + 382, + 106 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "This section will present the relevant information for all the public datasets and LLMs involved in the experiments of this paper.", + "bbox": [ + 174, + 131, + 823, + 161 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "In this study, we employ a total of 24 datasets, which are categorized into four primary types: multiple-choice questions (Choice), short answer questions (Short Answer), mathematical problems (Math), and classification tasks (Classification), as summarized in Table 3. To evaluate the multilingual capabilities of the xVerify model, each question type includes datasets in both Chinese and English, with one dataset featuring multilingual content. For each dataset, samples are partitioned into training and test sets following a 2:1 ratio, with the training and test sets ideally comprising 2,000 and 1,000 instances, respectively. In certain cases, the number of available samples is below 3,000, or the official test set is not publicly available, resulting in reduced dataset sizes after preprocessing.", + "bbox": [ + 174, + 166, + 825, + 279 + ], + "page_idx": 13 + }, + { + "type": "table", + "img_path": "images/ace9afe870fb516fba4a36b235780757dc31427ac11e60fe7a7dd45ba0ad352f.jpg", + "table_caption": [ + "Table 3: Datasets Description. The \"Type\" column indicates the question type in the corresponding dataset, including multiple-choice questions (Choice), short answer questions (Short Answer), math questions (Math), and classification questions (Classification)." + ], + "table_footnote": [], + "table_body": "
DatasetType#Train#TestLanguageLicense
CMMLUChoice20001000ChineseCC-BY-NC-4.0
C-EvalChoice1346260ChineseCC-BY-NC-SA-4.0
GPQAChoice794398EnglishCC-BY-4.0
MMLUChoice18161000EnglishMIT
MMLU-ProChoice20001000EnglishMIT
MMLU-RutexChoice20001000EnglishCC-BY-4.0
AgNewsClassification20001000EnglishUnspecified
AmazonClassification20001000EnglishApache-2.0
CLUEWSCClassification15481000ChineseUnspecified
CMNLIClassification20001000ChineseApache-2.0
AMC23Math2614EnglishUnspecified
AIME 2024Math2010EnglishMIT
CMATHMath1128565ChineseCC-BY-4.0
GSM8KMath20001000EnglishMIT
LiveMathBenchMath19093English & ChineseCC-BY-4.0
MATHMath20001000EnglishMIT
MGSMMath1892946MultilingualCC-BY-SA-4.0
OlympiadBenchMath1787892English & ChineseApache-2.0
ARCShort Answer20001000EnglishCC-BY-SA-4.0
CHIDShort Answer20001000ChineseApache-2.0
C-SimpleQAShort Answer20001000ChineseCC-BY-NC-SA-4.0
DROPShort Answer20001000EnglishCC-BY-SA-4.0
FRAMESShort Answer550274EnglishApache-2.0
SimpleQAShort Answer20001000EnglishMIT
", + "bbox": [ + 174, + 358, + 823, + 726 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "A total of 19 large language models (LLMs) are utilized in our experiments, encompassing a diverse range of model sizes and types, with a particular emphasis on reasoning models (see Table 4). These models are subsequently used to collect LLM-generated responses and to train the xVerify model.", + "bbox": [ + 174, + 757, + 823, + 799 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "B VAR Dataset Details", + "text_level": 1, + "bbox": [ + 174, + 838, + 377, + 854 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "This section will present detailed information about the components of the VAR dataset, the details of human annotations, and examples from the dataset.", + "bbox": [ + 174, + 882, + 823, + 911 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 491, + 935, + 506, + 946 + ], + "page_idx": 13 + }, + { + "type": "table", + "img_path": "images/ab208174476212e4137c9a301bd0ee711411191248932f47f50bc7023c55d50b.jpg", + "table_caption": [ + "Table 4: LLMs Description. LLMs are listed by release date. All models are chat or instruct type. \"NaN\" indicates that public data is unavailable." + ], + "table_footnote": [], + "table_body": "
Model#Para.TypePublisherDate
ChatGLM3-6B6BChatTsinghua2023.10
GPT-4oNaNChatOpenAI2024.05
Gemma-2-2B-it2BInstructGoogle2024.06
Gemma-2-9B-it9BInstructGoogle2024.06
GLM-4-9B-Chat9BChatTsinghua2024.06
InternLM2.5-7B-Chat7BChatShLab2024.06
Qwen2-1.5B-Instruct1.5BInstructAlibaba2024.06
Qwen2-7B-Instruct7BInstructAlibaba2024.06
Llama-3.1-8B-Instruct8BInstructMeta2024.07
Llama-3.2-1B-Instruct1BInstructMeta2024.09
Llama-3.2-3B-Instruct3BInstructMeta2024.09
Qwen2.5-7B-Instruct7BInstructAlibaba2024.09
Qwen2.5-14B-Instruct14BInstructAlibaba2024.09
Phi-414BChatMicrosoft2024.11
DeepSeek-R1-Distill-Llama-8B8BDistillDeepSeek2025.01
DeepSeek-R1-Distill-Qwen-1.5B1.5BDistillDeepSeek2025.01
DeepSeek-R1-Distill-Qwen-7B7BDistillDeepSeek2025.01
DeepSeek-R1-Distill-Qwen-14B14BDistillDeepSeek2025.01
QwQ-32B32BInstructAlibaba2025.03
", + "bbox": [ + 238, + 123, + 758, + 416 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "B.1 Details of Training, Test, and Generalization Sets", + "text_level": 1, + "bbox": [ + 171, + 444, + 555, + 459 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "B.1.1 Training Set", + "text_level": 1, + "bbox": [ + 171, + 470, + 316, + 486 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "The training set comprises 43,204 samples. Tables 5 to 8 provide the sample counts corresponding to each LLM, dataset, prompt template, and question type. Note that datasets with names containing \"_enh\" refer to the augmented multiple choice question datasets.", + "bbox": [ + 169, + 496, + 823, + 539 + ], + "page_idx": 14 + }, + { + "type": "table", + "img_path": "images/fd11d917852a50665391d27d111940fb6ecebf798a934d5a7eff7d7d0412bf43.jpg", + "table_caption": [ + "Table 5: Number of samples from each LLM in the training set." + ], + "table_footnote": [], + "table_body": "
ModelSample Counts
ChatGLM3-6B2588
GPT-4o2691
Gemma-2-2B-it2657
Gemma-2-9B-it2600
GLM-4-9B-Chat2957
InternLM2.5-7B-Chat2935
Qwen2-1.5B-Instruct2700
Qwen2-7B-Instruct2898
LLaMA-3.1-8B-Instruct2852
Qwen2.5-7B-Instruct2854
Qwen2.5-14B-Instruct2801
DeepSeek-R1-Distill-Llama-8B3223
DeepSeek-R1-Distill-Qwen-1.5B3231
DeepSeek-R1-Distill-Qwen-7B3075
DeepSeek-R1-Distill-Qwen-14B3142
", + "bbox": [ + 323, + 575, + 671, + 814 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "B.1.2 Test Set", + "text_level": 1, + "bbox": [ + 171, + 844, + 284, + 858 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "The test set comprises 6,122 samples. Tables 9 to 12 provide the sample counts corresponding to each LLM, dataset, prompt template, and question type. Note that datasets with names containing \"_enh\" refer to the augmented multiple choice question datasets.", + "bbox": [ + 169, + 869, + 823, + 912 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 14 + }, + { + "type": "table", + "img_path": "images/73a9453381a215760714368ac5cb85025f3e7bd128dfccae5838a59ffcaadddf.jpg", + "table_caption": [ + "Table 6: Number of samples from each dataset in the training set." + ], + "table_footnote": [], + "table_body": "
DatasetSample Counts
CMMLU1557
CMMLU_enh1641
GPQA1587
GPQA_enh1668
MMLU1520
MMLU_enh1513
MMLU-Pro1394
MMLU-Pro_enh1442
AgNews1751
CLUEWSC5008
AMC231625
AIME 20241333
CMATH1893
GSM8K1836
MATH2485
MGSM1384
OlympiadBench_en2573
OlympiadBench_zh2709
CHID2424
C-SimpleQA1913
DROP1928
FRAMES2020
", + "bbox": [ + 367, + 138, + 630, + 474 + ], + "page_idx": 15 + }, + { + "type": "table", + "img_path": "images/c2fad44e3447682f07ed7b3b720c84c552b7a4e0bd7f0a0ad060aca491fe686e.jpg", + "table_caption": [ + "Table 7: Number of samples from each prompt template in the training set." + ], + "table_footnote": [], + "table_body": "
Prompt TemplateSample Counts
0-shot4884
0-shot-restrict5977
0-shot-cot4907
0-shot-cot-restrict6041
5-shot4774
5-shot-restrict5866
5-shot-cot4916
5-shot-cot-restrict5839
", + "bbox": [ + 370, + 561, + 624, + 705 + ], + "page_idx": 15 + }, + { + "type": "table", + "img_path": "images/7f8043c5633395c2adfc55d0b319e11b7f5bbfb48b2a9d7af5961c8ed362baa1.jpg", + "table_caption": [ + "Table 8: Number of samples from each question type in the training set." + ], + "table_footnote": [], + "table_body": "
DatasetSample Counts
Multiple Choice12322
Math15838
Short Answer8285
Classification6759
", + "bbox": [ + 380, + 792, + 616, + 878 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 490, + 935, + 509, + 946 + ], + "page_idx": 15 + }, + { + "type": "table", + "img_path": "images/3dac7cbc26dfe8f1ceb6cbaed76906c9a6301581c993f71b06d82a52f269f72d.jpg", + "table_caption": [ + "Table 9: Number of samples from each LLM in the test set." + ], + "table_footnote": [], + "table_body": "
ModelSample Counts
ChatGLM3-6B378
GPT-4o400
Gemma-2-2B-it416
Gemma-2-9B-it369
GLM-4-9B-Chat367
InternLM2.5-7B-Chat367
Qwen2-1.5B-Instruct433
Qwen2-7B-Instruct427
LLaMA-3.1-8B-Instruct404
Qwen2.5-7B-Instruct374
Qwen2.5-14B-Instruct415
DeepSeek-R1-Distill-Llama-8B430
DeepSeek-R1-Distill-Qwen-1.5B451
DeepSeek-R1-Distill-Qwen-7B439
DeepSeek-R1-Distill-Qwen-14B452
", + "bbox": [ + 323, + 125, + 674, + 361 + ], + "page_idx": 16 + }, + { + "type": "table", + "img_path": "images/0629df9d0b19bee4203560dc4a695c83d0ddc6503641996064d63f483195b57e.jpg", + "table_caption": [ + "Table 10: Number of samples from each dataset in the test set." + ], + "table_footnote": [], + "table_body": "
DatasetSample Counts
CMMLU216
CMMLU_enh195
GPQA207
GPQA_enh235
MMLU225
MMLU_enh222
MMLU-Pro171
MMLU-Pro_enh192
AgNews261
CLUEWSC710
AMC23258
AIME 2024186
CMATH263
GSM8K262
MATH362
MGSM205
OlympiadBench_en349
OlympiadBench_zh446
CHID347
C-SimpleQA270
DROP265
FRAMES275
", + "bbox": [ + 367, + 417, + 630, + 752 + ], + "page_idx": 16 + }, + { + "type": "table", + "img_path": "images/c517511348593103d11d7b4b5735a2b47eb6ee73d4973baed5ac68d9f8ac00a3.jpg", + "table_caption": [ + "Table 11: Number of samples from each prompt template in the test set." + ], + "table_footnote": [], + "table_body": "
DatasetSample Counts
Multiple Choice1663
Math2331
Short Answer1157
Classification971
", + "bbox": [ + 380, + 809, + 617, + 895 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 16 + }, + { + "type": "table", + "img_path": "images/b1acc2ec410f8786487d62af1a5b00c560cbb70f19fd5ef0c2cc2a8b235279f6.jpg", + "table_caption": [ + "Table 12: Number of samples from each question type in the test set." + ], + "table_footnote": [], + "table_body": "
Prompt TemplateSample Counts
0-shot680
0-shot-restrict798
0-shot-cot642
0-shot-cot-restrict891
5-shot690
5-shot-restrict789
5-shot-cot702
5-shot-cot-restrict930
", + "bbox": [ + 370, + 109, + 624, + 252 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "B.1.3 Generalization Set", + "text_level": 1, + "bbox": [ + 171, + 279, + 357, + 292 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "The generalization set comprises 6,468 samples. Tables 13 to 16 provide the sample counts corresponding to each LLM, dataset, prompt template, and question type. Note that datasets with names containing \"_enh\" refer to the augmented multiple choice question datasets.", + "bbox": [ + 169, + 305, + 826, + 349 + ], + "page_idx": 17 + }, + { + "type": "table", + "img_path": "images/f5727c2e469bab12fce1b06ec5d22b6afe087d69e01d8fc5aaf17bfaa9fdf2fe.jpg", + "table_caption": [ + "Table 13: Number of samples from each LLM in the generalization set." + ], + "table_footnote": [], + "table_body": "
ModelSample Counts
ChatGLM3-6B300
GPT-4o305
Gemma-2-2B-it427
Gemma-2-9B-it296
GLM-4-9B-Chat339
InternLM2.5-7B-Chat341
Qwen2-1.5B-Instruct280
Qwen2-7B-Instruct346
LLaMA-3.1-8B-Instruct400
LLaMA-3.2-1B-Instruct314
LLaMA-3.2-3B-Instruct310
Qwen2.5-7B-Instruct326
Qwen2.5-14B-Instruct334
Phi-4314
DeepSeek-R1-Distill-Llama-8B341
DeepSeek-R1-Distill-Qwen-1.5B399
DeepSeek-R1-Distill-Qwen-7B375
DeepSeek-R1-Distill-Qwen-14B434
QwQ-32B287
", + "bbox": [ + 323, + 385, + 674, + 680 + ], + "page_idx": 17 + }, + { + "type": "table", + "img_path": "images/e9b4dd3cde946e173686828b3110179c284836bb8a3a8b4e6ad063eb9ec3c465.jpg", + "table_caption": [ + "Table 14: Number of samples from each dataset in the generalization set." + ], + "table_footnote": [], + "table_body": "
DatasetSample Counts
C-Eval435
C-Eval_enh442
MMLU-Redux436
MMLU-Redux_enh483
Amazon646
CMNLI643
LiveMathBench_en1127
LiveMathBench_zh821
ARC807
SimpleQA628
", + "bbox": [ + 367, + 727, + 630, + 897 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 17 + }, + { + "type": "table", + "img_path": "images/0a73b1232815b5f11a0943458d3e6c251b60e51f233064cae9b36a3e559a35f7.jpg", + "table_caption": [ + "Table 15: Number of samples from each prompt template in the generalization set." + ], + "table_footnote": [], + "table_body": "
DatasetSample Counts
Multiple Choice1796
Math1948
Short Answer1435
Classification1289
", + "bbox": [ + 379, + 109, + 617, + 196 + ], + "page_idx": 18 + }, + { + "type": "table", + "img_path": "images/f0f39aa49410fc0e741f63585bbd3a48e8a9c8fdfc5d987e2b79d4ffef1da1bd.jpg", + "table_caption": [ + "Table 16: Number of samples from each question type in the generalization set." + ], + "table_footnote": [], + "table_body": "
Prompt TemplateSample Counts
0-shot703
0-shot-restrict856
0-shot-cot772
0-shot-cot-restrict915
5-shot690
5-shot-restrict885
5-shot-cot756
5-shot-cot-restrict891
", + "bbox": [ + 372, + 229, + 624, + 371 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "B.2 Details of Human Annotation", + "text_level": 1, + "bbox": [ + 171, + 396, + 421, + 410 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "To ensure high-quality annotation for the VAR dataset, we assembled a team of 8 annotators. Among them, 6 hold bachelor's degrees and are primarily responsible for batch annotation tasks, while the other 2 hold master's degrees and focus on reviewing complex cases or resolving discrepancies in annotations made by multiple annotators. The gender ratio within the annotation team is balanced at 1:1. In terms of compensation, all annotators were paid according to the local industry average rates. The annotation process lasted for three weeks, covering a total of 15 working days.", + "bbox": [ + 169, + 421, + 826, + 506 + ], + "page_idx": 18 + }, + { + "type": "image", + "img_path": "images/b0c4cb4845ea9f03c0724fd183e74ebd191cb72e12c445bf83a4897206519880.jpg", + "image_caption": [ + "Figure 3: Illustration of the Label Studio Interface." + ], + "image_footnote": [], + "bbox": [ + 173, + 517, + 823, + 777 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "The detailed annotation guidelines are presented below. Figure 3 shows an example of the interface used in our annotation tool. Each sample to be annotated contains four fields: question, LLM output, correct answer, and answer range. The question type includes four categories: multiple choice, math, short answer, and classification. Annotators are required to judge whether the LLM output matches the correct answer based on the question, while the answer range serves as auxiliary reference information to support the decision-making process. The specific annotation instructions and criteria are as follows:", + "bbox": [ + 169, + 814, + 826, + 912 + ], + "page_idx": 18 + }, + { + "type": "page_number", + "text": "19", + "bbox": [ + 490, + 935, + 509, + 946 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Answer evaluation criteria for different question types:", + "text_level": 1, + "bbox": [ + 171, + 90, + 558, + 107 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "- Multiple Choice", + "text_level": 1, + "bbox": [ + 215, + 118, + 346, + 131 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "For multiple-choice questions, answer options may be labeled with letters (A, B, C, D, ...) Roman numerals (I, II, III, IV, ...), or Arabic numerals (1, 2, 3, 4, ...). The LLM output is considered correct if it provides:", + "bbox": [ + 225, + 132, + 826, + 174 + ], + "page_idx": 19 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Only the correct option label;", + "- Only the correct option content;", + "- Both the correct label and content." + ], + "bbox": [ + 245, + 178, + 491, + 224 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "In cases where the label and content are inconsistent, the content takes precedence. If the content is correct, the answer is marked as correct; if the content is incorrect, the answer is marked as incorrect, even if the option label is correct (see the final annotation example for reference).", + "bbox": [ + 227, + 229, + 823, + 285 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "- Short Answer", + "text_level": 1, + "bbox": [ + 215, + 290, + 331, + 301 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Short-answer questions may require responses such as names, locations, numbers, dates, or full sentences. The evaluation criteria are:", + "bbox": [ + 227, + 304, + 823, + 330 + ], + "page_idx": 19 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- For concise answers (e.g., names, places, dates), strict string matching is required.", + "- For sentence-level answers, semantic consistency with the reference answer is required.", + "- For numerical answers, mathematical equivalence must be verified (e.g., \"12000\" and \"12,000\" are considered equivalent)." + ], + "bbox": [ + 243, + 335, + 825, + 397 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "- Classification", + "text_level": 1, + "bbox": [ + 215, + 401, + 328, + 414 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Classification questions come with a fixed set of candidate answers. The LLM output must explicitly and exactly match the correct answer in this set to be judged as correct.", + "bbox": [ + 227, + 415, + 823, + 444 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Math", + "text_level": 1, + "bbox": [ + 215, + 448, + 272, + 459 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "For mathematical questions, the final answer in the LLM output must be mathematically equivalent to the reference answer. Evaluation criteria include:", + "bbox": [ + 227, + 460, + 823, + 489 + ], + "page_idx": 19 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- If an initial answer (ans1) is given but followed by a derived final answer (ans2) through calculation, ans2 should be used for evaluation.", + "- If the LLM output or ground-truth answer is provided in LaTeX format and cannot be visually interpreted, a LaTeX compiler should be used to determine equivalence." + ], + "bbox": [ + 243, + 494, + 823, + 551 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Special cases:", + "text_level": 1, + "bbox": [ + 171, + 565, + 271, + 579 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Overly Long Responses", + "text_level": 1, + "bbox": [ + 215, + 592, + 398, + 606 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "If the LLM output is excessively long, use the final answer provided as the basis for judgment. If the response does not converge to a clear answer (e.g., repeated changes or ambiguity), it should be marked as incorrect.", + "bbox": [ + 227, + 606, + 826, + 647 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "- Truncated Calculations", + "text_level": 1, + "bbox": [ + 215, + 652, + 398, + 665 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "In long responses where the final verification or calculation is truncated, it can be ignored. If a clear answer was provided earlier, use it for evaluation; if not, mark the response as incorrect (see the second-to-last annotation example).", + "bbox": [ + 227, + 666, + 825, + 708 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Unfamiliar Domains", + "text_level": 1, + "bbox": [ + 215, + 712, + 377, + 724 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "If the correctness of the LLM response cannot be determined due to unfamiliar domain knowledge (e.g., mathematical expressions whose equivalence is hard to judge), the case may be skipped and will later be annotated by more qualified annotators.", + "bbox": [ + 227, + 726, + 823, + 768 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "- Irrelevant but Matched Answers", + "text_level": 1, + "bbox": [ + 215, + 772, + 462, + 785 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "If the LLM output is irrelevant to the question but coincidentally contains the correct final answer, it should be marked as incorrect.", + "bbox": [ + 227, + 786, + 823, + 814 + ], + "page_idx": 19 + }, + { + "type": "page_number", + "text": "20", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "B.3 Examples from the VAR Dataset", + "text_level": 1, + "bbox": [ + 171, + 90, + 442, + 106 + ], + "page_idx": 20 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "{ \"dataset\": \"SimpleQA\", \"question\": \"In which year did Fayaz A. Malik (an Indian pharmacologist, cancer biologist, and scientist) receive the Young Scientist of the Year from the Council of Scientific and Industrial Research?\", \"question_type\": \"short answer\", \"correct_answer\": \"2009\", \"llm_output\": \"The year Fayaz A. Malik received the Young Scientist of the Year award from the Council of Scientific and Industrial Research was 2001\\. \\textbackslash textbackslash n\\textbackslash textbackslash nThe answer is 2001.\", \"humanjudge_result\": \"Incorrect\" } { \"dataset\": \"MMLU-Redux_enh\", \"question\": \"If you wanted to find the global distribution of coal , you would use a Answer Choices: (I) reference map. (II) topographic map. (III) thematic map. (IV) location map.\", \"question_type\": \"multiple choice\", \"correct_answer\": \"III\", \"llm_output\": \"The answer is (III) thematic map.\", \"humanjudge_result\": \"Correct\" } { \"dataset\": \"Amazon\", \"question\": \"Lightsaber Action!!: This game is absolutely the best game I have ever seen! there is a little bit of violence, but most of the time you kill droids, not humans. My favorite weapon is the lightsaber, because you can deflect laser blasts\\. \\textbackslash textbackslash n Please identify the sentiment polarity of the sentence: positive or negative\", \"question_type\": \"classification\", \"correct_answer\": \"positive\", \"llm_output\": \"The answer is positive.\", \"humanjudge_result\": \"Correct\" } { \"dataset\": \"GSM8K\", \"question\": \"Daisy and Rose were enjoying their backyard pool with their dogs. If there are 24 legs/paws in the pool, how many dogs do Daisy and Rose have?\", \"question_type\": \"math\", \"correct_answer\": \"5\", \"llm_output\": \"To find the number of dogs Daisy and Rose have, we need to first determine the total number of people and then subtract that from the total number of legs/paws\\.n\\sSince each person has 2 legs, we can divide the total number of legs/paws by 2 to find the number of people\\.n24 legs/paws / 2 legs/person = 12 people\\n\\sSince Daisy and Rose are the only people in the pool, we can subtract them from the total number of people to find the number of dogs\\.n12 people - 2 people = 10 dogs\\n\\nTherefore, Daisy and Rose have 10 dogs\\.\". \"humanjudge_result\": \"Incorrect\" }", + "guess_lang": "jsonl", + "bbox": [ + 169, + 118, + 831, + 820 + ], + "page_idx": 20 + }, + { + "type": "page_number", + "text": "21", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "C Model Training Details", + "text_level": 1, + "bbox": [ + 171, + 89, + 405, + 107 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "This section will further present additional information about the training of the xVerify model.", + "bbox": [ + 169, + 119, + 795, + 137 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "C.1 Training Hyperparameters", + "text_level": 1, + "bbox": [ + 171, + 151, + 405, + 167 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "The xVerify model is trained using the QLoRA method, with consistent hyperparameter settings across all base models. The training is carried out on multiple GPU servers. Table 17 presents the key training hyperparameters.", + "bbox": [ + 169, + 176, + 823, + 219 + ], + "page_idx": 21 + }, + { + "type": "table", + "img_path": "images/896e2b96f77717b8e0c38a400cdd6dd74ebd302e370ea750aa771547dddedf3e.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
HyperparameterSetting
Per Device Train Batch Size1
Gradient Accumulation Steps8
Learning Rate1.0e-4
Num Train Epochs1.0
LrScheduler Typecosine
Warmup Ratio0.1
Bf16true
Ddp Timeout180000000
Lora Rank8
", + "bbox": [ + 318, + 252, + 655, + 409 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Table 17: Hyperparameter settings for model training.", + "bbox": [ + 318, + 229, + 676, + 246 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "C.2 Original Model Details", + "text_level": 1, + "bbox": [ + 171, + 431, + 375, + 446 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "This paper uses 14 original models of different parameter scales and types for training on the VAR dataset. Table 18 presents the relevant information for all xVerify models and their corresponding original models.", + "bbox": [ + 169, + 457, + 823, + 500 + ], + "page_idx": 21 + }, + { + "type": "table", + "img_path": "images/66b6457509766ada8a1bb0ea03cd9f4f8d43c958b3a107ea849da0475400826c.jpg", + "table_caption": [ + "Table 18: Details of Original Models and Corresponding xVerify Models. Sorted by Original Model Name." + ], + "table_footnote": [], + "table_body": "
Original Model#Para.TypeContext LengthxVerify Model
Gemma-2-2B-it2BInstruct8KxVerify-2B-I
Gemma-2-9B-it9BInstruct8KxVerify-9B-I
Gemma-2-27B-it27BInstruct8KxVerify-27B-I
GLM-4-9B-Chat9BChat128KxVerify-9B-C
Llama-3.2-1B-Instruct1BInstruct128KxVerify-1B-I
Llama-3.2-3B-Instruct3BInstruct128KxVerify-3B-Ia
Llama-3.1-8B-Instruct8BInstruct128KxVerify-8B-I
Phi-414BInstruct16kxVerify-14B-Ib
Qwen2.5-0.5B-Instruct0.5BInstruct128KxVerify-0.5B-I
Qwen2.5-1.5B-Instruct1.5BInstruct128KxVerify-1.5B-I
Qwen2.5-3B-Instruct3BInstruct128KxVerify-3B-Ib
Qwen2.5-7B-Instruct7BInstruct128KxVerify-7B-I
Qwen2.5-14B-Instruct14BInstruct128KxVerify-14B-Ia
Qwen2.5-32B-Instruct32BInstruct128KxVerify-32B-I
", + "bbox": [ + 227, + 545, + 769, + 768 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "D Prompts", + "text_level": 1, + "bbox": [ + 171, + 795, + 282, + 813 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "This section will present all the prompt templates used in the experiments of this paper.", + "bbox": [ + 169, + 825, + 743, + 843 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "D.1 Prompts for Generating LLM Responses", + "text_level": 1, + "bbox": [ + 171, + 857, + 501, + 873 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "The prompt templates used to generate LLM responses are illustrated in Figures 4 to 7. Each template consists of four fields that need to be populated: \"task_type\", \"task_description\", \"examples\", and", + "bbox": [ + 169, + 883, + 823, + 912 + ], + "page_idx": 21 + }, + { + "type": "page_number", + "text": "22", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "\"question\". The \"task_type\" and \"task_description\" fields are determined based on the type of question. For instance, for questions from the GPQA dataset, \"task_type\" is set to \"multidisciplinary question\", and \"task_description\" is set to \"Please choose the answer from options A to D, corresponding to the question.\" During dataset preprocessing, we design appropriate \"task_type\" and \"task_description\" values for each dataset. The \"examples\" field is filled according to the selected prompting strategy, either 0-shot or 5-shot. In the 0-shot setting, this field is left empty, while in the 5-shot setting, it is populated with five example question-answer pairs that are similar to the target \"question\". The \"question\" field contains the specific query to be answered by the LLM. Examples of the \"examples\" and \"question\" fields are shown in Figures 8 and 9, respectively.", + "bbox": [ + 169, + 90, + 826, + 217 + ], + "page_idx": 22 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "You are an expert in {task_type}, {task_description} \n{examples} \n{question}", + "guess_lang": "txt", + "bbox": [ + 196, + 239, + 509, + 280 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Figure 4: Few-shot prompt for generating LLM responses.", + "bbox": [ + 303, + 305, + 691, + 321 + ], + "page_idx": 22 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "You are an expert in {task_type}, {task_description} \n{examples} \n{question} \nEnd your final answer with 'The answer is .", + "guess_lang": "txt", + "bbox": [ + 196, + 352, + 517, + 417 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Figure 5: Few-shot-restrict prompt for generating LLM responses.", + "bbox": [ + 279, + 441, + 717, + 458 + ], + "page_idx": 22 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "You are an expert in {task_type}, {task_description} \n{examples} \n{question} \nLet's think step by step.", + "guess_lang": "txt", + "bbox": [ + 196, + 489, + 509, + 555 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Figure 6: Few-shot-cot prompt for generating LLM responses.", + "bbox": [ + 290, + 580, + 702, + 595 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "D.2 Prompts for GPT-4o Annotation", + "text_level": 1, + "bbox": [ + 171, + 619, + 441, + 633 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "The prompt templates used for annotating the collected LLM question-answer pairs with GPT-4o during the construction of the VAR dataset are shown in Figures 10 and 11. Both of these prompt templates employ the Chain-of-Thought (CoT) strategy to ensure the accuracy of the annotations generated by GPT-4o.", + "bbox": [ + 169, + 645, + 823, + 702 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "D.3 Prompts for Data Augmentation", + "text_level": 1, + "bbox": [ + 171, + 718, + 441, + 733 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "In constructing the VAR dataset, two prompt templates used to guide GPT-4o in augmenting mathematical question samples are presented in Figures 12 and 13.", + "bbox": [ + 169, + 743, + 826, + 772 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "D.4 Prompts for Judge Model", + "text_level": 1, + "bbox": [ + 171, + 787, + 395, + 803 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "In the experiments of this paper, the prompts used for all judge models were constructed based on the official templates provided by their respective developers. However, for some judge models, the official prompt templates were not fully compatible with the evaluation tasks in this paper, so other similar prompt templates were used. Specifically, Figure 14 shows the prompt template used by GPT-4o as Judge, Figure 15 shows the prompt template used by GPT-4o as Judge (CoT), Figure 16 shows the prompt template used by JudgeLM series models and PandaLM-7B-v1, Figure 17 shows the prompt template used by Auto-J series models, and Figure 18 shows the prompt template used", + "bbox": [ + 169, + 814, + 823, + 912 + ], + "page_idx": 22 + }, + { + "type": "page_number", + "text": "23", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "You are an expert in {task_type}, {task_description} \n{examples} \n{question}", + "bbox": [ + 196, + 131, + 509, + 171 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Let's think step by step.", + "bbox": [ + 197, + 181, + 339, + 196 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "End your final answer with 'The answer is .", + "bbox": [ + 197, + 207, + 517, + 220 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Figure 7: Few-shot-cot-restrict prompt for generating LLM responses.", + "bbox": [ + 266, + 246, + 728, + 261 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "***** Start In-Context Examples ****", + "text_level": 1, + "bbox": [ + 197, + 344, + 436, + 357 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Q: A late game rally by Washington led them to the Eagles' 26 yard line. A shot to the end zone by Robert Griffin III would be intercepted by Brandon Boykin, clinching an Eagles win. The Eagles would move to 6-5. This is the Eagles first win at Lincoln Financial Field since Week 4 of the 2012 season, because prior to this game, the Eagles had never won a game in their home stadium in 414 days since that same week, snapping a 10-game losing streak at home with this win. How many more wins than losses did the Eagles have after this game?", + "bbox": [ + 197, + 358, + 799, + 433 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "A: The answer is 1.", + "bbox": [ + 197, + 434, + 315, + 445 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Q: The population of Sevastopol proper is 418,987 (01.01.16), making it the largest in the Crimean Peninsula. The city's agglomeration has about 600,000 people (2015). According to the Ukrainian Census (2001), the ethnic groups of Sevastopol include Russians (71.6%), Ukrainians (22.4%), Belarusians (1.6%), Tatars (0.7%), Crimean Tatars (0.5%), Armenians (0.3%), Jews (0.3%), Moldovans (0.2%), and Azerbaijani people (0.2%). Which ethnic has a higher percentage of the population in Sevastopol: Russians or Armenians?", + "bbox": [ + 197, + 458, + 799, + 534 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "A: The answer is Russians.", + "bbox": [ + 197, + 535, + 359, + 546 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Q: the most common crimes in the ACT are property related crimes, unlawful entry with intent and motor vehicle theft. They affected 2,304 and 966 people (580 and 243 per 100,000 persons respectively). Homicide and related offences—murder, attempted murder and manslaughter, but excluding driving causing death and conspiracy to murder—affect 1.0 per 100,000 persons, which is below the national average of 1.9 per 100,000. Rates of sexual assault (64.4 per 100,000 persons) are also below the national average (98.5 per 100,000). Which was there a higher national average for, homicide and related offences or sexual assault?", + "bbox": [ + 197, + 559, + 799, + 646 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "A: The answer is sexual assault.", + "bbox": [ + 197, + 648, + 388, + 659 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Q: In the county, the population was spread out with $21.7\\%$ under the age of 18, $8.5\\%$ from 18 to 24, $26.9\\%$ from 25 to 44, $27.7\\%$ from 45 to 64, and $15.0\\%$ who were 65 years of age or older. The median age was 40 years. For every 100 females, there were 94.4 males. For every 100 females age 18 and over, there were 98.7 males. How many percent were not from 45 to 64?", + "bbox": [ + 197, + 672, + 799, + 722 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "A: The answer is 72.3.", + "bbox": [ + 197, + 723, + 333, + 734 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Q: The median age in the city was 35.1 years. $24.2\\%$ of residents were under the age of 18; $7.9\\%$ were between the ages of 18 and 24; $33.8\\%$ were from 25 to 44; $24.6\\%$ were from 45 to 64; and $9.5\\%$ were 65 years of age or older. The gender makeup of the city was $48.6\\%$ male and $51.4\\%$ females. How many more people, in terms of percentage, were in the largest age group compared to the second smallest?", + "bbox": [ + 197, + 747, + 799, + 810 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "A: The answer is 24.3.", + "bbox": [ + 197, + 811, + 333, + 821 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "***** End In-Context Examples ****", + "bbox": [ + 197, + 823, + 431, + 835 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Figure 8: Example of \"examples\" fields.", + "bbox": [ + 364, + 862, + 630, + 877 + ], + "page_idx": 23 + }, + { + "type": "page_number", + "text": "24", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Q: Let $ABCD$ be a tetrahedron such that $AB = CD = \\sqrt{41}$ , $AC = BD = \\sqrt{80}$ , and $BC = AD = \\sqrt{89}$ . There exists a point $I$ inside the tetrahedron such that the distances from $I$ to each of the faces of the tetrahedron are all equal. This distance can be written in the form $\\frac{m\\sqrt{n}}{p}$ , where $m, n$ , and $p$ are positive integers, $m$ and $p$ are relatively prime, and $n$ is not divisible by the square of any prime. Find $m + n + p$ .", + "bbox": [ + 196, + 99, + 799, + 170 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "A:", + "bbox": [ + 197, + 171, + 215, + 181 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Figure 9: Example of \"question\" fields.", + "bbox": [ + 367, + 205, + 627, + 222 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "You are a diligent and precise assistant tasked with evaluating the correctness of responses. Think step by step as you make your evaluation.", + "bbox": [ + 196, + 247, + 799, + 275 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "You will receive a question, an output sentence, and the correct answer. Your task is to determine if the output sentence accurately answers the question based on the provided correct answer. Think step by step and respond with either [Correct] or [Incorrect].", + "bbox": [ + 194, + 286, + 799, + 325 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Special considerations:", + "text_level": 1, + "bbox": [ + 197, + 335, + 336, + 348 + ], + "page_idx": 24 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. **Multiple Answers**: If the output contains multiple answers, evaluate whether later answers modify or correct earlier ones. In such cases, compare the final answer with the correct answer. If the final answer is unclear or incorrect, respond with [Incorrect].", + "2. **Mathematical Problems**: If the formats differ but the answers are mathematically equivalent, respond with [Correct].", + "3. **Explicit Options**: If the question provides explicit candidate answers, the output will be considered correct if it clearly indicates the correct option's code or the correct option's content.", + "4. **No Explicit Options**: If the question does not provide explicit options, the output must align with the correct answer in content and meaning to be considered [Correct]." + ], + "bbox": [ + 197, + 349, + 799, + 463 + ], + "page_idx": 24 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "Please present your response in the following JSON format: { \"reasoning\": \"Your step-by-step reasoning here.\", \"judgment\": \"Correct or Incorrect\" } Question: \"\"{question}\"\" Output sentence: \"\"{output}\"\" Correct answer: {answer}", + "guess_lang": "txt", + "bbox": [ + 197, + 464, + 552, + 579 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Figure 10: Prompt I for GPT-4o annotation.", + "bbox": [ + 352, + 612, + 643, + 628 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "by Prometheus series models. The official prompt template for the CompassJudger-1 series models corresponds to pairwise evaluation, so the prompt template used by this series is the same as that for the xVerify model, as shown in Figure 19.", + "bbox": [ + 169, + 656, + 826, + 699 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "D.5 Prompts for xVerify", + "text_level": 1, + "bbox": [ + 171, + 717, + 356, + 733 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Figure 19 shows the prompt template used to construct the input for the xVerify model. This template is used both for training and evaluation of the xVerify model. Specifically, \"question,\" \"output,\" and \"answer\" correspond to the question content, the LLM response, and the reference answer, respectively.", + "bbox": [ + 169, + 744, + 826, + 801 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "E Supplementary Experimental Results", + "text_level": 1, + "bbox": [ + 171, + 823, + 522, + 840 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "E.1 Evaluation Accuracy Results of All xVerify Models", + "text_level": 1, + "bbox": [ + 171, + 856, + 570, + 871 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Tables 19 and 20 present the performance of all $14\\mathrm{x}$ Verify models on the test set and the generalization set, respectively. Overall, each xVerify model achieves an F1 score and accuracy exceeding $96.5\\%$", + "bbox": [ + 169, + 883, + 825, + 912 + ], + "page_idx": 24 + }, + { + "type": "page_number", + "text": "25", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "You are a diligent and precise assistant tasked with evaluating the correctness of responses. Think step by step as you make your evaluation.", + "bbox": [ + 196, + 99, + 799, + 126 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "We request your feedback on whether the model's response correctly answers the user question above. Follow these steps to make your evaluation:", + "bbox": [ + 194, + 137, + 799, + 162 + ], + "page_idx": 25 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. Think step by step: Read the user question carefully.", + "2. Think step by step: Review the reference answer and understand the key points it covers.", + "3. Think step by step: Compare the model's answer with the reference answer.", + "4. Think step by step: Determine if the model's answer addresses the key points in the reference answer and correctly answers the question." + ], + "bbox": [ + 197, + 162, + 797, + 226 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "First, provide your reasoning in detail. Then, clearly state your judgment as either \"Correct\" or \"Incorrect.\"", + "bbox": [ + 194, + 237, + 799, + 263 + ], + "page_idx": 25 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "Please present your response in the following JSON format: \n{ \"reasoning\": \"Your step-by-step reasoning here.\", \"judgment\": \"Correct or Incorrect\" \n} \nQuestion: {question} \nReference Answer: {answer} \nModel's Answer: {output}", + "guess_lang": "txt", + "bbox": [ + 196, + 263, + 552, + 378 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Figure 11: Prompt II for GPT-4o annotation.", + "bbox": [ + 349, + 402, + 643, + 417 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "on the test set and over $95.52\\%$ on the generalization set. These results demonstrate not only the effectiveness of the xVerify models for evaluation tasks but also the high quality of the VAR dataset.", + "bbox": [ + 169, + 446, + 823, + 474 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "A comparison between the results on the two datasets shows that the performance on the generalization set experiences a slight decline relative to the test set, with the decrease not exceeding $1.6\\%$ . Moreover, models with larger parameter sizes exhibit smaller performance drops. This indicates that the xVerify models possess strong generalization capabilities, which further improve with an increase in parameter scale. Additionally, it is observed across both datasets that while the performance of xVerify models generally enhances with the increment of parameter size, beyond a certain threshold, further increases in parameter scale do not lead to additional performance gains.", + "bbox": [ + 169, + 479, + 823, + 579 + ], + "page_idx": 25 + }, + { + "type": "table", + "img_path": "images/af8a758a9ce34e27bf2d1dc172a720e7d29f1449eb7ef7443c3396698c925eb8.jpg", + "table_caption": [ + "Table 19: Evaluation Accuracy Results on the Test Set: All xVerify Models. The best performance in each column is shown in **bold**, and the second-best performance is underlined." + ], + "table_footnote": [], + "table_body": "
xVerify ModelMultiple ChoiceMathShort AnswerClassificationTotal
F1Acc.F1Acc.F1Acc.F1Acc.F1Acc.
xVerify-0.5B-I97.78%97.90%93.74%94.64%96.72%97.49%99.71%99.59%96.69%96.85%
xVerify-1B-I97.22%97.35%94.76%95.45%96.06%96.97%99.71%99.59%96.77%96.91%
xVerify-1.5B-I97.85%97.96%95.10%95.75%96.05%96.97%99.63%99.49%97.05%97.17%
xVerify-2B-I97.93%98.02%95.06%95.71%96.06%96.97%99.78%99.69%97.09%97.21%
xVerify-3B-Ia97.73%97.84%95.00%95.67%96.17%97.06%99.71%99.59%97.02%97.14%
xVerify-3B-Ib97.31%97.41%95.65%96.18%96.38%97.23%99.78%99.69%97.17%97.27%
xVerify-7B-I97.75%97.84%95.94%96.44%96.51%97.32%99.78%99.69%97.41%97.50%
xVerify-8B-I97.92%98.02%95.34%95.97%96.05%96.97%99.71%99.59%97.17%97.29%
xVerify-9B-C98.29%98.38%95.26%95.88%96.06%96.97%99.78%99.69%97.25%97.37%
xVerify-9B-I97.43%97.53%95.75%96.27%96.06%96.97%99.78%99.69%97.19%97.29%
xVerify-14B-Ia97.49%97.59%95.73%96.22%95.41%96.46%99.63%99.49%97.06%97.16%
xVerify-14B-Ib97.67%97.78%96.10%96.57%95.74%96.72%99.71%99.59%97.31%97.40%
xVerify-27B-I97.81%97.90%95.46%96.01%96.19%97.06%99.56%99.38%97.15%97.26%
xVerify-32B-I97.81%97.90%95.88%96.31%96.18%97.06%99.71%99.59%97.32%97.40%
", + "bbox": [ + 174, + 628, + 821, + 813 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "E.2 Computational Efficiency and Operational Cost of xVerify and Judge Models", + "text_level": 1, + "bbox": [ + 169, + 842, + 751, + 858 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Table 21 displays the running time performance of the xVerify model and other judge models. Each model was evaluated using 200 randomly selected samples per question type from the generalization set, with running times measured in seconds. This data provides insights into the computational", + "bbox": [ + 169, + 869, + 823, + 912 + ], + "page_idx": 25 + }, + { + "type": "page_number", + "text": "26", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "You are an expert in mathematical calculations and data expressions. You are required to provide different equivalent forms of the standard answer for the following math problem.", + "bbox": [ + 197, + 214, + 795, + 252 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Problem: {question}", + "bbox": [ + 197, + 253, + 346, + 265 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Answer: {answer}", + "bbox": [ + 197, + 265, + 323, + 277 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Example 1:", + "bbox": [ + 197, + 290, + 276, + 301 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": " Problem: Let $ \\alpha \\beta \\gamma be the radian measure of the smallest angle in a $3-4-5$ right triangle. Let $ \\alpha \\beta \\gamma be the radian measure of the smallest angle in a $7-24-25$ right triangle. Express $ \\alpha \\beta \\gamma in terms of $ \\alpha \\beta \\gamma$.", + "bbox": [ + 197, + 303, + 800, + 340 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": " Answer: \\\\frac{\\backslashpi}{2} - 2\\alpha", + "bbox": [ + 197, + 340, + 455, + 353 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Output:", + "bbox": [ + 197, + 354, + 253, + 364 + ], + "page_idx": 26 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "\"\\\"json {\n \"answer1\": \"\\\"\\pi/2 - 2\\alpha\", \n \"answer2\": \"pi/2 - 2\\alpha\", \n \"answer3\": \"pi/2 - 2 * \\alpha\", \n \"answer4\": \"0.5 * \\pi - 2 * \\alpha\"\n}\");", + "guess_lang": "javascript", + "bbox": [ + 200, + 366, + 522, + 429 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Example 2:", + "bbox": [ + 197, + 441, + 276, + 453 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Problem: A volcano erupts and spews ash into the sky. The ash cloud spreads out in a diameter eighteen times as far as the distance it shot up into the sky. If the ashes erupted three hundred feet into the sky, what was the radius of the ash cloud in feet?", + "bbox": [ + 197, + 454, + 808, + 503 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Answer: 2700", + "bbox": [ + 197, + 505, + 292, + 515 + ], + "page_idx": 26 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "Output:\n```\n\"\\\"json {\n \"answer1\": \"2.7×10^3\",\n \"answer2\": \"2700.0\",\n \"answer3\": \"2.7 \\times times 10^3\",\n \"answer4\": \"$2.7 \\times times 10^3$\",\n \"answer5\": \"Two thousand seven hundred\"}''", + "guess_lang": "txt", + "bbox": [ + 197, + 517, + 568, + 604 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Please note:", + "bbox": [ + 197, + 617, + 292, + 628 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "1. You need to provide 3 to 5 different standard forms of the answer", + "bbox": [ + 197, + 630, + 704, + 642 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "2. Each different form must be equivalent to the standard answer, i.e., it should still be a correct and valid answer.", + "bbox": [ + 197, + 643, + 769, + 666 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "3. You may use LaTeX, scientific notation, or other standard mathematical expressions.", + "bbox": [ + 197, + 667, + 761, + 691 + ], + "page_idx": 26 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "4. Please follow the JSON format below for the output:\n```\n\"\\\"json {\n \"answer1\": \"xxx\", \"answer2\": \"xxx\", \"answer3\": \"xxx\", ...\n}...\"", + "guess_lang": "txt", + "bbox": [ + 197, + 693, + 669, + 742 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Figure 12: Prompt for Generating Alternative Reference Answers.", + "bbox": [ + 279, + 779, + 715, + 794 + ], + "page_idx": 26 + }, + { + "type": "page_number", + "text": "27", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 26 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "You are an expert in mathematical calculations and data expressions. For an answer to a specific mathematical problem, you are required to provide equivalent and different expressions of the mathematical result. Answer: {output} \nExample 1: Answer: The answer is $\\beta = \\backslash$ frac{pi{2}-2\\alpha}. Output: \"'json { \"answer1\": \"The answer is $\\backslash$ pi/2 - 2\\alpha}. , \"answer2\": \"The answer is pi/2 - 2\\alpha}. , \"answer3\": \"The answer is pi/2 - 2\\* alpha.\", \"answer4\": \"The answer is $0.5*$ pi-2\\* alpha.\" }\"\" \nExample 2: Answer: The answer is 2700 feet. Output: \"'json { \"answer1\": \"The answer is $2.7\\times 10^{-3}$ feet.\", \"answer2\": \"The answer is 2700.0 feet.\", \"answer3\": \"The answer is 2.7 times $10^{-3}$ feet.\", \"answer4\": \"The answer is $\\$ 2.7$ times $10^{-3}\\{3\\}$ feet.\", \"answer5\": \"The answer is Two thousand seven hundred feet.\" }\"\" \nPlease note: 1. You need to provide 3 to 5 different expressions, each replacing the mathematical result with an equivalent and different form. 2. Each expression must be exactly equivalent to the target answer to ensure its correctness. 3. You can use LaTeX, scientific notation, or other standard mathematical formats. 4. Please output the result in the following JSON format: \"'json { \"answer1\": \"The answer is xxx\", \"answer2\": \"The answer is xxx\", \"answer3\": \"The answer is xxx\", \"answer4\": \"The answer is xxx\", \"answer5\": \"The answer is xxx\" }\"\"", + "guess_lang": "txt", + "bbox": [ + 169, + 214, + 826, + 762 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Figure 13: Prompt for Generating Diverse Final Answer Expressions.", + "bbox": [ + 267, + 772, + 725, + 789 + ], + "page_idx": 27 + }, + { + "type": "page_number", + "text": "28", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "You are a diligent and precise assistant tasked with evaluating the correctness of responses. You will receive a question, an output sentence, and the correct answer. Your task is to determine if the output sentence accurately answers the question based on the provided correct answer. Respond with either [Correct] or [Incorrect].", + "bbox": [ + 196, + 128, + 799, + 181 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Special considerations:", + "text_level": 1, + "bbox": [ + 197, + 191, + 336, + 204 + ], + "page_idx": 28 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. **Multiple Answers**: If the output contains multiple answers, evaluate whether later answers modify or correct earlier ones. In such cases, compare the final answer with the correct answer. If the final answer is unclear or incorrect, respond with [Incorrect].", + "2. **Mathematical Problems**: If the formats differ but the answers are mathematically equivalent, respond with [Correct].", + "3. **Explicit Options**: If the question provides explicit candidate answers, the output will be considered correct if it clearly indicates the correct option's code or the correct option's content.", + "4. **No Explicit Options**: If the question does not provide explicit options, the output must align with the correct answer in content and meaning to be considered [Correct]." + ], + "bbox": [ + 197, + 205, + 799, + 318 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Please present your response in the following JSON format:", + "bbox": [ + 197, + 319, + 552, + 330 + ], + "page_idx": 28 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "{\" judgement\": \"Correct or Incorrect\"}", + "guess_lang": "json", + "bbox": [ + 197, + 332, + 444, + 371 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Question: \"\"{question}\"\"", + "bbox": [ + 197, + 380, + 362, + 393 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Output sentence: \"\"{response}\"\"", + "bbox": [ + 197, + 393, + 405, + 406 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Correct answer: {reference}", + "bbox": [ + 197, + 407, + 367, + 419 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Figure 14: Prompt for GPT-4o as Judge.", + "bbox": [ + 364, + 444, + 630, + 460 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "You are a diligent and precise assistant tasked with evaluating the correctness of responses. Think step by step as you make your evaluation. You will receive a question, an output sentence, and the correct answer. Your task is to determine if the output sentence accurately answers the question based on the provided correct answer. Think step by step and respond with either [Correct] or [Incorrect].", + "bbox": [ + 196, + 536, + 799, + 589 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Special considerations:", + "text_level": 1, + "bbox": [ + 197, + 599, + 336, + 612 + ], + "page_idx": 28 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. **Multiple Answers**: If the output contains multiple answers, evaluate whether later answers modify or correct earlier ones. In such cases, compare the final answer with the correct answer. If the final answer is unclear or incorrect, respond with [Incorrect].", + "2. **Mathematical Problems**: If the formats differ but the answers are mathematically equivalent, respond with [Correct].", + "3. **Explicit Options**: If the question provides explicit candidate answers, the output will be considered correct if it clearly indicates the correct option's code or the correct option's content.", + "4. **No Explicit Options**: If the question does not provide explicit options, the output must align with the correct answer in content and meaning to be considered [Correct]." + ], + "bbox": [ + 197, + 613, + 799, + 726 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Please present your response in the following JSON format:", + "bbox": [ + 197, + 727, + 552, + 739 + ], + "page_idx": 28 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "\"reasoning\": \"Your step-by-step reasoning here.\", \"judgement\": \"Correct or Incorrect\"", + "guess_lang": "txt", + "bbox": [ + 197, + 739, + 522, + 792 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Question: \"\"{question}\"\"", + "bbox": [ + 197, + 800, + 362, + 814 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Output sentence: \"\"{response}\"", + "bbox": [ + 197, + 814, + 405, + 827 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Correct answer: {reference}", + "bbox": [ + 197, + 827, + 367, + 839 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Figure 15: Prompt for GPT-4o as Judge (CoT).", + "bbox": [ + 341, + 864, + 653, + 880 + ], + "page_idx": 28 + }, + { + "type": "page_number", + "text": "29", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 28 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "You are a helpful and precise assistant for checking the quality of the answer. \n[Question] \n{question} \n[Reference Answer] \n{reference} \n[Model's Answer] \n{response} \n[System] \nWe would like to request your feedback on the performance of the model's response to the user question displayed above. \nBased on the reference answer, please rate the accuracy of the response. The model receives an overall score on a scale of 1 to 10, where a higher score indicates better overall performance. \nPlease first output a single line containing only the score. In the subsequent line, please provide a comprehensive explanation of your evaluation, avoiding any potential bias. \n```java\n```java\nYou are a helpful and precise assistant for checking the quality of the answer. \n[Question] \n{question} \n[Reference Answer] \n{reference} \n[Model's Answer] \n{response} \n[System]", + "guess_lang": "txt", + "bbox": [ + 196, + 98, + 799, + 304 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Figure 16: Prompt for JudgeLM.", + "bbox": [ + 388, + 327, + 607, + 343 + ], + "page_idx": 29 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "[INST] Write critiques for a submitted response on a given user's query, incorporating the correct answer as a reference, and grade the response accordingly:", + "guess_lang": "txt", + "bbox": [ + 197, + 371, + 799, + 398 + ], + "page_idx": 29 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "[BEGIN DATA] \n\\*\\*\\* \n[Query]: {question} \n\\*\\*\\* \n[Correct Answer]: {reference} \n\\*\\*\\* \n[Response]: {response} \n\\*\\*\\* \n[END DATA] \nWrite critiques for this response. After that, you should give a final rating for the response on a scale of 1 to 10 by strictly following this format: \"[rating]\", for example: \"Rating: [[5]]\". [/INST]", + "guess_lang": "javascript", + "bbox": [ + 196, + 409, + 799, + 575 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Figure 17: Prompt for Auto-J.", + "bbox": [ + 397, + 598, + 598, + 614 + ], + "page_idx": 29 + }, + { + "type": "table", + "img_path": "images/fe6fa443c497f26a8ddbc810733dddf78eeec3fa5aadf72694d2815fa363a742.jpg", + "table_caption": [ + "Table 20: Evaluation Accuracy Results on the Generalization Set: All xVerify Models. The best performance in each column is shown in **bold**, and the second-best performance is **underlined**." + ], + "table_footnote": [], + "table_body": "
xVerify ModelMultiple ChoiceMathShort AnswerClassificationTotal
F1Acc.F1Acc.F1Acc.F1Acc.F1Acc.
xVerify-0.5B-I96.49%96.10%80.00%91.94%96.95%97.00%99.03%98.53%95.29%95.53%
xVerify-1B-I96.10%95.66%82.45%92.51%97.32%97.35%98.92%98.37%95.43%95.62%
xVerify-1.5B-I96.76%96.38%83.58%93.12%97.46%97.49%98.88%98.29%95.85%96.03%
xVerify-2B-I96.27%95.82%82.11%92.51%97.60%97.63%98.98%98.45%95.57%95.75%
xVerify-3B-Ia96.44%95.99%86.10%94.25%97.31%97.35%99.03%98.53%96.11%96.27%
xVerify-3B-Ib96.21%95.71%86.20%94.15%97.60%97.63%99.03%98.53%96.08%96.23%
xVerify-7B-I96.16%95.66%87.86%94.87%97.45%97.49%98.93%98.37%96.22%96.37%
xVerify-8B-I96.67%96.27%86.76%94.61%97.45%97.49%99.03%98.53%96.33%96.49%
xVerify-9B-C97.00%96.66%87.08%94.71%97.45%97.49%98.98%98.45%96.45%96.61%
xVerify-9B-I96.06%95.55%87.47%94.76%97.53%97.56%99.13%98.68%96.23%96.38%
xVerify-14B-Ia96.11%95.60%90.20%95.74%97.32%97.35%99.13%98.68%96.53%96.65%
xVerify-14B-Ib96.35%95.88%87.88%94.92%97.45%97.49%98.93%98.37%96.30%96.44%
xVerify-27B-I96.01%95.49%85.64%93.99%97.32%97.35%99.13%98.68%95.93%96.09%
xVerify-32B-I96.22%95.71%90.09%95.59%97.32%97.35%99.03%98.53%96.50%96.60%
", + "bbox": [ + 176, + 667, + 821, + 852 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "efficiency of each model under uniform testing conditions, thereby facilitating a comparative analysis of their real-time processing capabilities and scalability in practical applications.", + "bbox": [ + 169, + 883, + 823, + 912 + ], + "page_idx": 29 + }, + { + "type": "page_number", + "text": "30", + "bbox": [ + 488, + 935, + 509, + 946 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "You are a fair judge assistant tasked with providing clear, objective feedback based on specific criteria, ensuring each assessment reflects the absolute standards set for performance.\"", + "bbox": [ + 197, + 99, + 799, + 126 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Task Description:", + "bbox": [ + 197, + 127, + 328, + 138 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "An instruction (might include an Input inside it), a response to evaluate, a reference answer that gets a score of 5, and a score rubric representing a evaluation criteria are given.", + "bbox": [ + 197, + 138, + 797, + 164 + ], + "page_idx": 30 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. Write a detailed feedback that assess the quality of the response strictly based on the given score rubric, not evaluating in general.", + "2. After writing a feedback, write a score that is an integer between 1 and 5. You should refer to the score rubric.", + "3. The output format should look as follows: \"Feedback: (write a feedback for criteria) [RESULT] (an integer number between 1 and 5)\" 4. Please do not generate any other opening, closing, and explanations." + ], + "bbox": [ + 197, + 164, + 797, + 241 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "The instruction to evaluate:", + "bbox": [ + 197, + 252, + 383, + 263 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "{question}", + "bbox": [ + 197, + 265, + 261, + 279 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "##Response to evaluate:", + "bbox": [ + 197, + 290, + 351, + 301 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "{response}", + "bbox": [ + 197, + 303, + 263, + 316 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Reference Answer (Score 5):", + "bbox": [ + 197, + 327, + 393, + 340 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "{reference}", + "bbox": [ + 197, + 340, + 267, + 354 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Score Rubrics:", + "bbox": [ + 197, + 366, + 310, + 378 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "[Does the model demonstrate logical and effective reasoning in its responses?]", + "bbox": [ + 197, + 378, + 799, + 391 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Score 1: The model's responses show a complete lack of logical reasoning, often resulting in irrelevant or nonsensical answers.", + "bbox": [ + 197, + 391, + 799, + 415 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Score 2: The model occasionally shows signs of logical reasoning but generally struggles to provide coherent or relevant responses.", + "bbox": [ + 197, + 415, + 799, + 441 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Score 3: The model usually demonstrates basic reasoning capabilities, though it may not consistently apply logical principles or fully resolve complex issues.", + "bbox": [ + 197, + 441, + 799, + 465 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Score 4: The model frequently exhibits strong reasoning skills, effectively addressing complex questions with minor inconsistencies or errors.", + "bbox": [ + 197, + 465, + 799, + 491 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Score 5: The model consistently demonstrates advanced reasoning abilities, providing logically sound, coherent, and sophisticated responses to complex queries.", + "bbox": [ + 197, + 491, + 799, + 517 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Feedback:", + "bbox": [ + 197, + 529, + 284, + 541 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Figure 18: Prompt for Prometheus.", + "bbox": [ + 380, + 578, + 614, + 593 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "You are a diligent and precise assistant tasked with evaluating the correctness of responses. You will receive a question, an output sentence, and the correct answer. Your task is to determine if the output sentence accurately answers the question based on the provided correct answer. Respond with either [Correct] or [Incorrect].", + "bbox": [ + 197, + 614, + 799, + 666 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Special considerations:", + "bbox": [ + 197, + 678, + 336, + 689 + ], + "page_idx": 30 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. **Multiple Answers**: If the output contains multiple answers, evaluate whether later answers modify or correct earlier ones. In such cases, compare the final answer with the correct answer. If the final answer is unclear or incorrect, respond with [Incorrect].", + "2. **Mathematical Problems**: If the formats differ but the answers are mathematically equivalent, respond with [Correct].", + "3. **Explicit Options**: If the question provides explicit candidate answers, the output will be considered correct if it clearly indicates the correct option's code or the correct option's content.", + "4. **No Explicit Options**: If the question does not provide explicit options, the output must align with the correct answer in content and meaning to be considered [Correct]." + ], + "bbox": [ + 197, + 690, + 799, + 804 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Question: \"\"{question}\"\"", + "bbox": [ + 197, + 815, + 362, + 829 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Output sentence: \"\"{output}\"", + "bbox": [ + 197, + 829, + 392, + 840 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Correct answer: {answer}", + "bbox": [ + 197, + 840, + 352, + 854 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Judgement:", + "bbox": [ + 197, + 854, + 269, + 867 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Figure 19: Prompt for xVerify.", + "bbox": [ + 395, + 892, + 599, + 907 + ], + "page_idx": 30 + }, + { + "type": "page_number", + "text": "31", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "All models were executed on GPUs with identical configurations. Specifically, Prometheus-8x7B-v2.0, JudgeLM-33B-v1.0, CompassJudger-1-32B, xVerify-27B-I, and xVerify-32B-I were deployed on two GPUs for inference, while the remaining models were deployed on a single GPU. From Table 21, it is evident that all xVerify models exhibit an overall average runtime within 100 seconds, whereas the overall average runtime for the other judge models exceeds 100 seconds. Moreover, for each question category, the models with the shortest evaluation times are the xVerify models. Thus, the xVerify models demonstrably surpass the other judge models in terms of evaluation efficiency.", + "bbox": [ + 169, + 90, + 826, + 188 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "Table 22 presents the evaluation costs incurred when employing GPT-4o as the judge, based on assessments of 200 randomly selected samples per question type, along with the overall expenditure. Apart from the prerequisite deployment overhead, the cost of invoking the xVerify models for evaluation is substantially lower than that of GPT-4o. Additionally, compared to GPT-4o, which relies on remote server deployment, the locally deployed xVerify models offer higher invocation efficiency. Taken together, these results underscore that the xVerify models outperform the other judge models in both usage cost and evaluation efficiency.", + "bbox": [ + 169, + 194, + 826, + 294 + ], + "page_idx": 31 + }, + { + "type": "table", + "img_path": "images/fc0db9d96b61c24a2bb9520c503fadc3531bfe8b2e6d23d36abedd249b486d06.jpg", + "table_caption": [ + "Table 21: Running Time Comparison of xVerify Models and Other Judge Models (200 Samples per Question Type). The best performance in each column is shown in **bold**, and the second-best performance is underlined." + ], + "table_footnote": [], + "table_body": "
Method TypeMethodMultiple Choice (s)Math (s)Short Answer (s)Classification (s)Avg (s)
Judge ModelPandaLM-7B-v1304.5076.2476.9765.79130.88
Auto-J-Bilingual-6B1,570.441,802.711,194.081,148.321,428.89
Auto-J-13B3,055.003,622.702,807.231,903.002,846.98
Prometheus-7B-v2.01,173.80947.71706.74696.34881.15
Prometheus-8x7B-v2.01,557.101,128.081,132.84750.511,142.13
JudgeLM-7B-v1.0551.88469.10394.57348.05440.90
JudgeLM-13B-v1.0777.73598.19564.25529.60617.44
JudgeLM-33B-v1.01,041.831,018.37789.80762.99903.25
CompassJudger-1-1.5B189.45244.08139.50110.95171.00
CompassJudger-1-7B163.96568.72450.2080.58315.87
CompassJudger-1-14B346.80571.66217.86196.18333.13
CompassJudger-1-32B147.53258.10133.59152.11172.83
xVerifyxVerify-0.5B-I38.9741.2539.1238.8739.55
xVerify-1B-I33.9136.6333.4433.4734.36
xVerify-1.5B-I43.0546.8742.1742.0843.54
xVerify-2B-I38.4473.1639.2937.3847.07
xVerify-3B-Ia38.5444.5437.1143.0240.80
xVerify-3B-Ib46.9353.58106.0647.8463.60
xVerify-7B-I68.2495.5050.6651.6766.52
xVerify-8B-I78.0661.5745.3446.8257.95
xVerify-9B-C131.0770.1651.6652.5776.37
xVerify-9B-I54.2069.9149.4151.0656.15
xVerify-14B-Ia59.18114.9155.5054.8071.10
xVerify-14B-Ib61.17145.19116.4357.5595.09
xVerify-27B-I85.2889.4158.9961.0073.67
xVerify-32B-I131.0598.9964.7467.4590.56
", + "bbox": [ + 173, + 354, + 823, + 646 + ], + "page_idx": 31 + }, + { + "type": "table", + "img_path": "images/022d0e835114f130f558611eb0eb1ecb4e306987c7a3bc23db29b6b363a9a7bb.jpg", + "table_caption": [ + "Table 22: Total costs (in USD) of GPT-4o as Judge (200 Samples per Question Type)." + ], + "table_footnote": [], + "table_body": "
MethodMultiple Choice ($)Math ($)Short Answer ($)Classification ($)Total ($)
GPT-4o as Judge0.310.660.240.271.48
GPT-4o as Judge (CoT)0.551.000.420.482.45
", + "bbox": [ + 207, + 686, + 790, + 732 + ], + "page_idx": 31 + }, + { + "type": "page_number", + "text": "32", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 31 + } +] \ No newline at end of file diff --git a/data/2025/2504_10xxx/2504.10481/29785fca-1f46-4ab1-92e1-b0b4c9aee15b_model.json b/data/2025/2504_10xxx/2504.10481/29785fca-1f46-4ab1-92e1-b0b4c9aee15b_model.json new file mode 100644 index 0000000000000000000000000000000000000000..adf031fc4e291370577282b9a99c69fec6b7f8d5 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10481/29785fca-1f46-4ab1-92e1-b0b4c9aee15b_model.json @@ -0,0 +1,4983 @@ +[ + [ + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.265, + 0.061, + 0.706 + ], + "angle": 270, + "content": "arXiv:2504.10481v1 [cs.CL] 14 Apr 2025" + }, + { + "type": "header", + "bbox": [ + 0.19, + 0.126, + 0.222, + 0.151 + ], + "angle": 0, + "content": "#" + }, + { + "type": "header", + "bbox": [ + 0.225, + 0.13, + 0.808, + 0.178 + ], + "angle": 0, + "content": "xVerify: Efficient Answer Verifier for Reasoning Model Evaluations" + }, + { + "type": "text", + "bbox": [ + 0.262, + 0.227, + 0.741, + 0.245 + ], + "angle": 0, + "content": "Ding Chen\\(^{1*}\\) Qingchen Yu\\(^{2*}\\) Pengyuan Wang\\(^{2*}\\) Wentao Zhang\\(^{3\\dagger}\\)" + }, + { + "type": "text", + "bbox": [ + 0.248, + 0.263, + 0.754, + 0.28 + ], + "angle": 0, + "content": "Bo Tang² Feiyu Xiong² Xinchi Li¹ Minchuan Yang¹ Zhiyu Li²†" + }, + { + "type": "text", + "bbox": [ + 0.321, + 0.292, + 0.677, + 0.308 + ], + "angle": 0, + "content": "1 Research Institute of China Telecom, Beijing, China" + }, + { + "type": "text", + "bbox": [ + 0.344, + 0.308, + 0.654, + 0.322 + ], + "angle": 0, + "content": "2 MemTensor (Shanghai) Technology Co., Ltd." + }, + { + "type": "text", + "bbox": [ + 0.327, + 0.322, + 0.672, + 0.351 + ], + "angle": 0, + "content": "3 Center for Data Science, Peking University wentao.zhang@pku.edu.cn, lizy@iaar.ac.cn" + }, + { + "type": "title", + "bbox": [ + 0.46, + 0.387, + 0.538, + 0.402 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.418, + 0.768, + 0.709 + ], + "angle": 0, + "content": "With the release of the o1 model by OpenAI, reasoning models adopting slow thinking strategies have gradually emerged. As the responses generated by such models often include complex reasoning, intermediate steps, and self-reflection, existing evaluation methods are often inadequate. They struggle to determine whether the LLM output is truly equivalent to the reference answer, and also have difficulty identifying and extracting the final answer from long, complex responses. To address this issue, we propose xVerify, an efficient answer verifier for reasoning model evaluations. xVerify demonstrates strong capability in equivalence judgment, enabling it to effectively determine whether the answers produced by reasoning models are equivalent to reference answers across various types of objective questions. To train and evaluate xVerify, we construct the VAR dataset by collecting question-answer pairs generated by multiple LLMs across various datasets, leveraging multiple reasoning models and challenging evaluation sets designed specifically for reasoning model assessment. A multi-round annotation process is employed to ensure label accuracy. Based on the VAR dataset, we train multiple xVerify models of different scales. In evaluation experiments conducted on both the test set and generalization set, all xVerify models achieve overall F1 scores and accuracy exceeding \\(95\\%\\). Notably, the smallest variant, xVerify-0.5B-I, outperforms all evaluation methods except GPT-4o, while xVerify-3B-Ib surpasses GPT-4o in overall performance. These results validate the effectiveness and generalizability of xVerify. All resources for xVerify are available at https://github.com/IAAR-Shanghai/xVerify." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.735, + 0.314, + 0.75 + ], + "angle": 0, + "content": "1 Introduction" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.766, + 0.827, + 0.877 + ], + "angle": 0, + "content": "With the emergence of chain of thought (CoT) prompting [35], researchers began to explicitly encourage LLMs to generate intermediate reasoning steps, thereby enhancing their ability to handle complex tasks. Following this, OpenAI introduced the o1 model [15], which proposed the concepts of slow thinking and scaling at test time. Specifically, the model is trained to output a detailed reasoning process before generating a final answer, significantly improving its performance on complex tasks. Inspired by this paradigm, a variety of reasoning models have emerged, such as DeepSeek-R1 [3] trained with GRPO, OpenAI's o3-mini [26], and QwQ-32B [31]. However, the rise of reasoning models poses substantial challenges for evaluation. Since the outputs of these models often contain" + }, + { + "type": "page_footnote", + "bbox": [ + 0.191, + 0.886, + 0.466, + 0.9 + ], + "angle": 0, + "content": "*Equal contribution. † Corresponding authors" + }, + { + "type": "footer", + "bbox": [ + 0.172, + 0.923, + 0.315, + 0.937 + ], + "angle": 0, + "content": "Preprint. Under review." + } + ], + [ + { + "type": "text", + "bbox": [ + 0.171, + 0.092, + 0.825, + 0.135 + ], + "angle": 0, + "content": "lengthy reasoning processes—potentially including redundant information, intermediate results, and even self-contradictions—it becomes significantly more difficult for evaluation tools to extract the final answer from such responses [2]." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.14, + 0.827, + 0.336 + ], + "angle": 0, + "content": "Developing evaluation methods tailored for LLM responses involving complex reasoning has become a key research focus. LLM reasoning is typically categorized into commonsense, logical, multihop, and mathematical reasoning [8]. Existing evaluation methods fall into automatic and human evaluation [2], with automatic evaluation gaining prominence due to its scalability and lower cost. The main automatic approaches for evaluating reasoning models include rule-based evaluation frameworks [13, 5, 27, 9, 25] and LLM-based judgment methods [20, 7, 18]. However, both approaches face limitations in reasoning model evaluation. Rule-based frameworks often struggle to extract final answers from lengthy reasoning traces, rely on strict formatting (e.g., syntactically correct LaTeX), and typically ignore the reasoning process itself—an oversimplification challenged by many researchers [36, 33, 14, 32]. Judge models are usually not optimized for reasoning evaluation and mainly produce qualitative scores or comments [7], making them more suitable for subjective questions. Objective tasks, in contrast, require accurate binary classification. Currently, effective automatic methods specifically designed for evaluating reasoning on objective questions remain lacking." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.34, + 0.828, + 0.535 + ], + "angle": 0, + "content": "To address these challenges, we introduce xVerify, an efficient LLM-answer verifier tailored for evaluating LLM responses to objective questions. xVerify processes the full LLM output, enabling it to accurately identify final answers from complex reasoning traces. It also supports robust equivalence checking, including symbol conversion (e.g., 'alpha' \\(\\rightarrow\\) 'α'), mathematical expression matching, and semantic alignment in natural language. Moreover, it is tolerant of formatting errors such as malformed LaTeX, making it applicable to a wide range of tasks, including math problems, multiple-choice, short-answer, and classification questions. To train and evaluate xVerify, we construct the Verify Answer for Reasoning (VAR) dataset, which includes responses from 19 LLMs across 24 reasoning benchmarks. All labels are verified through multi-round GPT-4o and human review. The dataset covers advanced reasoning models and benchmarks like GPQA, LiveMathBench, and AIME 2024. We fine-tune xVerify on a variety of base models (e.g., Qwen2.5, LLaMA, Gemma 2) and scales (0.5B-32B). Remarkably, even the smallest variant (xVerify-0.5B-I) surpasses existing evaluation methods—including 32B-sized models—on all metrics, while larger variants achieve F1 and accuracy over \\(95\\%\\) on both test and generalization sets." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.54, + 0.595, + 0.555 + ], + "angle": 0, + "content": "The main contributions of this paper are summarized as follows:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.563, + 0.825, + 0.606 + ], + "angle": 0, + "content": "- We construct the VAR dataset, which contains answer samples from 19 LLMs across 24 evaluation benchmarks. The dataset is annotated via multiple rounds of GPT-4o and human review, and is designed for training and evaluating judge models for reasoning tasks." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.609, + 0.825, + 0.638 + ], + "angle": 0, + "content": "- We propose xVerify, an efficient answer verifier for reasoning model evaluations, and release multiple fine-tuned versions of xVerify. The checkpoints are publicly available2." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.64, + 0.825, + 0.683 + ], + "angle": 0, + "content": "- We conduct comprehensive comparative evaluations against multiple existing evaluation frameworks and judge models on both test and generalization datasets, thoroughly validating the effectiveness and applicability of xVerify." + }, + { + "type": "list", + "bbox": [ + 0.217, + 0.563, + 0.825, + 0.683 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.701, + 0.323, + 0.716 + ], + "angle": 0, + "content": "2 Related Work" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.731, + 0.827, + 0.857 + ], + "angle": 0, + "content": "Evaluation methods have always been a crucial component in the development of LLM [2]. However, the open-ended nature of LLM outputs makes it difficult to apply standardized metrics, limiting the effectiveness of traditional evaluation methods [20]. The rise of reasoning models [26, 3, 31], which often generate lengthy and complex reasoning, further complicates evaluation. For objective tasks, the main challenge is to accurately extract the final answer from the LLM's semi-structured output and compare it with the reference answer. Existing approaches are typically divided into human evaluation and automatic evaluation. While human evaluation offers flexibility, automatic methods are more cost-efficient and consistent [2]. Current automatic methods mainly include rule-based evaluation frameworks and LLM-based judgment methods." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.862, + 0.827, + 0.891 + ], + "angle": 0, + "content": "Rule-based methods are widely used in automatic evaluation frameworks such as LM Eval Harness [5], OpenCompass [27], UltraEval [9], and OpenAI Evalu [25]. Tools like Math-Verify [13] also follow" + }, + { + "type": "page_footnote", + "bbox": [ + 0.192, + 0.897, + 0.706, + 0.913 + ], + "angle": 0, + "content": "\\(^{2}\\)Hugging Face collections: https://huggingface.co/collections/IAAR-Shanghai/xverify" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.505, + 0.948 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.17, + 0.092, + 0.827, + 0.273 + ], + "angle": 0, + "content": "this approach, extracting final answers using regular expressions (RegEx) and comparing them with reference answers. However, LLM outputs often contain final answers in varied surface forms—e.g., \"alpha\" vs. \"α\", \"A\" vs. \"a\", or \"1000\" vs. \"10³\"—which can be semantically equivalent but textually different. While some tools support limited transformations, they typically handle only LaTeX expressions or simple string patterns, and struggle with basic semantic equivalence like \"one hundred\" vs. \"100\". For reasoning models, the output is usually lengthy and involves complex reasoning steps with intermediate results. This makes it difficult for regular expressions to accurately identify the final answer, causing rule-based approaches to frequently fail in such contexts. Moreover, prior work has shown that LLMs may revise or overturn their initial predictions during extended reasoning processes, exhibiting a kind of self-reflection [32]. At the same time, rule-based methods typically ignore the reasoning process and only evaluate the final answer, which has drawn criticism from many researchers—especially in the context of reasoning models [36, 33, 14]. Thus, rule-based evaluations have limited applicability in reasoning scenarios." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.278, + 0.828, + 0.487 + ], + "angle": 0, + "content": "LLM-based judgment methods use fine-tuned LLMs to evaluate the quality of other LLMs' responses. Compared to traditional evaluation methods, they offer greater task adaptability, generate interpretable results, reduce evaluation costs, and can be applied across the LLM lifecycle [20, 7, 18]. For objective questions, these judge models can extract final answers from responses with intermediate reasoning or self-reflection. In recent years, many LLM-based judge models have emerged, including JudgeLM [39], PandaLM [34], Auto-J [21], Prometheus 2 [17], CompassJudger [1], CritiqueLLM [16], and Themis [12]. Judge models typically support pointwise, pairwise, and listwise evaluations [20], and some also serve as reward models in reinforcement learning. However, most are designed to assign scores to LLM outputs, making them more suitable for subjective evaluations like helpfulness, reliability, or relevance. For objective questions that require binary decisions (\"correct\" or \"incorrect\"), these models are less effective. Although scores can be binarized using thresholds, this approach is unreliable, as the models are not explicitly trained for such tasks. Moreover, the current LLM-based critic models and PRMs (Process Reward Models) exhibit subpar performance when detecting errors in long chain-of-thought responses generated by reasoning models [10]. Thus, while judge model holds promise for evaluating reasoning models, they require targeted training." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.492, + 0.827, + 0.548 + ], + "angle": 0, + "content": "In summary, automatic evaluation on objective tasks remains underdeveloped. Rule-based and LLM-based methods each have clear limitations, while human annotation is costly and hard to scale. To address these challenges, we propose xVerify, a robust and targeted judge model specifically designed for objective evaluation of LLMs." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.567, + 0.367, + 0.582 + ], + "angle": 0, + "content": "3 Problem Definition" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.599, + 0.825, + 0.64 + ], + "angle": 0, + "content": "To evaluate the correctness of LLM responses to objective questions, the key is to extract the final answer from the response and compare it with the reference answer. We formally define this evaluation task as follows:" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.647, + 0.827, + 0.704 + ], + "angle": 0, + "content": "We formalize this task as a 4-tuple \\((\\mathrm{Q},\\mathrm{R},\\mathrm{A}_{\\mathrm{ref}},\\mathrm{E})\\), where \\(\\mathrm{Q} = \\{q_1,q_2,\\dots,q_n\\}\\) is the set of questions, \\(\\mathrm{R} = \\{r_1,r_2,\\dots,r_n\\mid r_i = \\mathcal{W}(q_i)\\}\\) is the set of responses generated by an LLM \\(\\mathcal{W}\\), \\(\\mathrm{A}_{\\mathrm{ref}} = \\{a_{ref}^{1},\\dots,a_{ref}^{n}\\}\\) is the set of reference answers, and \\(\\mathrm{E}:\\mathrm{Q}\\times \\mathrm{R}\\times \\mathrm{A}_{\\mathrm{ref}}\\to 0,1\\) is the evaluation function that returns 1 if the response is correct and 0 otherwise." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.71, + 0.826, + 0.78 + ], + "angle": 0, + "content": "For the stage of extracting the final answer, given a response \\( r \\) to question \\( q \\), which may include intermediate reasoning and multiple candidate answers, we denote the extracted candidates as \\( \\mathrm{A}(r) \\). To identify the final answer, we define a scoring function \\( \\mathrm{S} : \\mathrm{A}(r) \\times \\mathrm{Q} \\to \\mathbb{R} \\) that measures the relevance or suitability of each candidate \\( a \\in \\mathrm{A}(r) \\) to \\( q \\), and select the final answer using the extraction function:" + }, + { + "type": "equation", + "bbox": [ + 0.405, + 0.801, + 0.825, + 0.823 + ], + "angle": 0, + "content": "\\[\n\\varepsilon (q, r) = \\arg \\max _ {a \\in \\mathrm {A} (r)} \\mathrm {S} (a, q). \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.836, + 0.827, + 0.879 + ], + "angle": 0, + "content": "For the equivalence comparison stage, we define an equivalence function \\(\\psi : \\mathrm{A}_{\\mathrm{ref}} \\times \\mathrm{A}_{\\mathrm{final}} \\to \\{0,1\\}\\), where \\(\\psi\\) returns 1 if the predicted answer is equivalent to the reference, and 0 otherwise. Since answers may appear in different forms, \\(\\psi\\) integrates results from the following three sub-functions:" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.884, + 0.825, + 0.914 + ], + "angle": 0, + "content": "For mathematical expressions, we define a composite normalization function \\(\\Phi_{\\mathrm{norm}}^{\\mathrm{math}} = \\phi_{\\mathrm{err}} \\circ \\phi_{\\mathrm{syn}} \\circ \\phi_{\\mathrm{alg}} \\circ \\phi_{\\mathrm{dim}}\\), where \\(\\phi_{\\mathrm{err}}\\) repairs minor syntax errors, \\(\\phi_{\\mathrm{syn}}\\) unifies syntactic structures, \\(\\phi_{\\mathrm{alg}}\\) performs" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.504, + 0.948 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.171, + 0.092, + 0.825, + 0.123 + ], + "angle": 0, + "content": "algebraic simplification, and \\(\\phi_{\\mathrm{dim}}\\) ensures consistency in physical units. By transforming expressions into a canonical form, \\(\\Phi_{\\mathrm{norm}}^{math}\\) enables reliable equivalence comparison:" + }, + { + "type": "equation", + "bbox": [ + 0.276, + 0.142, + 0.826, + 0.178 + ], + "angle": 0, + "content": "\\[\n\\psi_ {m a t h} \\left(a _ {r e f} ^ {m a t h}, a _ {f i n a l} ^ {m a t h}\\right) = \\left\\{ \\begin{array}{l l} 1 & \\text {i f} \\Phi_ {\\text {n o r m}} ^ {m a t h} \\left(a _ {r e f} ^ {m a t h}\\right) = \\Phi_ {\\text {n o r m}} ^ {m a t h} \\left(a _ {f i n a l} ^ {m a t h}\\right), \\\\ 0 & \\text {o t h e r w i s e} \\end{array} \\right. \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.194, + 0.825, + 0.254 + ], + "angle": 0, + "content": "For natural language answers, we define a comparison function \\(\\psi_{\\mathrm{nl}}: \\mathrm{A}_{\\mathrm{ref}}^{\\mathrm{nl}} \\times \\mathrm{A}_{\\mathrm{final}}^{\\mathrm{nl}} \\to \\{0,1\\}\\) to assess semantic equivalence. Specifically, we introduce a semantic alignment function \\(\\phi_{\\mathrm{align}}^{nl}\\) to measure the similarity between two textual answers. The equivalence decision is made by comparing the alignment score with a predefined threshold \\(\\tau\\):" + }, + { + "type": "equation", + "bbox": [ + 0.321, + 0.275, + 0.826, + 0.311 + ], + "angle": 0, + "content": "\\[\n\\psi_ {n l} \\left(a _ {r e f} ^ {n l}, a _ {f i n a l} ^ {n l}\\right) = \\left\\{ \\begin{array}{l l} 1 & \\text {i f} \\phi_ {\\text {a l i g n}} ^ {n l} \\left(a _ {r e f} ^ {n l}, a _ {f i n a l} ^ {n l}\\right) \\geq \\tau , \\\\ 0 & \\text {o t h e r w i s e} \\end{array} \\right. \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.327, + 0.827, + 0.385 + ], + "angle": 0, + "content": "For symbolic representations, we define a composite normalization function \\(\\Phi_{\\mathrm{norm}}^{sym} = \\phi_{\\mathrm{uni}} \\circ \\phi_{\\mathrm{font}} \\circ \\phi_{\\mathrm{dom}}\\) which unifies symbols by applying \\(\\phi_{\\mathrm{uni}}\\) for Unicode normalization, \\(\\phi_{\\mathrm{font}}\\) for aligning font styles, and \\(\\phi_{\\mathrm{dom}}\\) for domain-specific mappings. This produces a standardized form for character-level comparison, and the \\(\\Phi_{\\mathrm{norm}}^{sym}\\) is defined as:" + }, + { + "type": "equation", + "bbox": [ + 0.291, + 0.405, + 0.826, + 0.446 + ], + "angle": 0, + "content": "\\[\n\\psi_ {s y m} \\left(a _ {r e f} ^ {s y m}, a _ {f i n a l} ^ {s y m}\\right) = \\left\\{ \\begin{array}{l l} 1 & \\text {i f} \\Phi_ {\\text {n o r m}} ^ {s y m} \\left(a _ {r e f} ^ {s y m}\\right) = \\Phi_ {\\text {n o r m}} ^ {s y m} \\left(a _ {f i n a l} ^ {s y m}\\right), \\\\ 0 & \\text {o t h e r w i s e} \\end{array} \\right. \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.46, + 0.825, + 0.49 + ], + "angle": 0, + "content": "Based on the above components, we define a unified equivalence function \\(\\psi\\) to determine whether the final answer \\(a_{final}\\) matches the reference answer \\(a_{ref}\\) across different modalities. The definition is:" + }, + { + "type": "equation", + "bbox": [ + 0.312, + 0.51, + 0.826, + 0.58 + ], + "angle": 0, + "content": "\\[\n\\psi \\left(a _ {\\text {f i n a l}}, a _ {\\text {r e f}}\\right) = \\left\\{ \\begin{array}{l l} 1, & \\text {i f} \\psi_ {\\text {m a t h}} \\left(a _ {\\text {f i n a l}} ^ {\\text {m a t h}}, a _ {\\text {r e f}} ^ {\\text {m a t h}}\\right) = 1 \\\\ & \\quad \\wedge \\psi_ {\\text {n l}} \\left(a _ {\\text {f i n a l}} ^ {\\text {n l}}, a _ {\\text {r e f}} ^ {\\text {n l}}\\right) = 1 \\\\ & \\quad \\wedge \\psi_ {\\text {s y m}} \\left(a _ {\\text {f i n a l}} ^ {\\text {s y m}}, a _ {\\text {r e f}} ^ {\\text {s y m}}\\right) = 1; \\\\ 0, & \\text {o t h e r w i s e} \\end{array} \\right. \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.596, + 0.825, + 0.642 + ], + "angle": 0, + "content": "Here, \\( a_{final}^{math}, a_{final}^{nl} \\), and \\( a_{final}^{sym} \\) represent the mathematical, natural language, and symbolic parts of the final answer, respectively, and similarly for \\( a_{ref} \\). This allows for equivalence checking in both unimodal and multimodal settings." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.647, + 0.58, + 0.66 + ], + "angle": 0, + "content": "To summarize, the overall evaluation function \\( \\mathrm{E} \\) is defined as:" + }, + { + "type": "equation", + "bbox": [ + 0.388, + 0.685, + 0.826, + 0.703 + ], + "angle": 0, + "content": "\\[\n\\mathrm {E} (q, r, a _ {r e f}) = \\psi (\\varepsilon (q, r), a _ {r e f}) \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.717, + 0.827, + 0.746 + ], + "angle": 0, + "content": "where \\( q \\) is the objective question, \\( r \\) is the response generated by the LLM, and \\( a_{ref} \\) is the corresponding reference answer." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.768, + 0.317, + 0.786 + ], + "angle": 0, + "content": "4 Methodology" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.801, + 0.827, + 0.913 + ], + "angle": 0, + "content": "The xVerify training and evaluation pipeline includes three main stages: collecting LLM responses, VAR dataset construction, and xVerify judge pipeline (see Figure 1). We first gather question-response pairs from various LLMs across four types of objective questions, including complex, reasoning-intensive examples. To ensure accurate labels, we employ multiple rounds of annotation and rechecking using both GPT-4o and human annotators. We also apply data augmentation to increase the dataset's diversity and complexity. Finally, we train xVerify models of different sizes on the VAR dataset to evaluate long, multi-step answers—cases that are often difficult for existing evaluation methods. Section 4.1 details the dataset construction, and Section 4.2 describes the training process." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.505, + 0.948 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.223, + 0.087, + 0.778, + 0.357 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.36, + 0.828, + 0.432 + ], + "angle": 0, + "content": "Figure 1: Framework of xVerify: (1) Collecting LLM Responses: aggregate responses from multiple LLMs across datasets covering four question types. (2) VAR Dataset Construction: employ GPT-4o and human annotators for labeling and rechecking, and use data augmentation to refine the dataset. (3) xVerify Judge Pipeline: accurately evaluate multi-component answers from reasoning models on challenging questions." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.453, + 0.304, + 0.467 + ], + "angle": 0, + "content": "4.1 VAR Dataset" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.481, + 0.825, + 0.565 + ], + "angle": 0, + "content": "xVerify is designed to assess the correctness of reasoning models' responses on objective questions. However, current judge models are mostly trained on tasks such as scoring or reviewing, and reasoning models with lengthy responses have only recently emerged. As a result, there is currently no suitable dataset for training xVerify. To better train and evaluate xVerify, we constructed a dedicated dataset named Verify Answer for Reasoning (VAR). Examples from the VAR dataset are provided in Appendix B.3." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.583, + 0.416, + 0.598 + ], + "angle": 0, + "content": "4.1.1 LLM Response Generation" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.608, + 0.827, + 0.747 + ], + "angle": 0, + "content": "To ensure the diversity and coverage of the dataset, we selected 19 mainstream LLMs and 24 frequently used multilingual datasets to generate and collect responses. To better simulate the answering patterns of reasoning models in common evaluation scenarios, the chosen LLMs include recently released models such as the DeepSeek-R1-Distill series [3] and QwQ-32B [31]. Most of the other LLMs also support context lengths exceeding \\(32k\\) tokens, enabling them to produce answers with extended reasoning chains. The selected datasets include high-difficulty benchmarks commonly used for evaluating reasoning models, such as GPQA [28], AIME 2024 [24], MATH [11], and LiveCodeBench [23], which typically require multi-step reasoning and computation to solve. During data generation, we also retained some extremely long responses, such as those exceeding 6k characters in length. Detailed information on all LLMs and datasets is provided in Appendix A." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.753, + 0.825, + 0.837 + ], + "angle": 0, + "content": "To train and evaluate xVerify more effectively, we grouped the 24 datasets into four types based on question and answer formats: multiple choice, math, short answer, and classification. Multiple choice questions offer several labeled options; math includes questions where answers are mathematical expressions (e.g., numbers, equations), including mathematics and physics problems; short answer questions expect brief natural language responses like names or dates, with no strict format constraints; classification tasks involve selecting the correct label, such as for sentiment or topic classification." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.843, + 0.825, + 0.913 + ], + "angle": 0, + "content": "To reflect realistic evaluation settings and generate a diverse set of Q&A samples, we designed multiple prompt templates for guiding the LLMs in response generation. The prompt configurations vary along several dimensions: 0-shot vs. 5-shot, with or without CoT, and with or without answer format restrictions (restrict), resulting in eight distinct prompt types. Details of all prompt templates are provided in Appendix D.1." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.504, + 0.948 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.171, + 0.092, + 0.825, + 0.123 + ], + "angle": 0, + "content": "In total, we generated 191,600 Q&A samples using the 19 LLMs and 24 evaluation sets, providing a rich and diverse sample pool for constructing the dataset." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.134, + 0.367, + 0.15 + ], + "angle": 0, + "content": "4.1.2 Dataset Partitioning" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.158, + 0.825, + 0.188 + ], + "angle": 0, + "content": "Based on the previously collected sample pool, we constructed the training, test, and generalization sets through filtering and preprocessing." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.192, + 0.825, + 0.249 + ], + "angle": 0, + "content": "The training and test sets are used to train and evaluate the xVerify model. Both are sampled from the same pool, sharing similar distributions. Specifically, they include samples generated by 15 LLMs across 17 evaluation sets, covering the four previously mentioned question types. The training set contains 36,941 samples, and the test set includes 5,194 samples." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.254, + 0.826, + 0.326 + ], + "angle": 0, + "content": "The generalization set complements the test set by evaluating xVerify's ability to handle more diverse and challenging distributions, reflecting real-world scenarios. It consists of 5,366 samples from 7 evaluation sets not used in the training or test sets, while still spanning all four question types. These samples are generated by 19 LLMs, including 4 models not seen in training or testing, such as the reasoning model QwQ-32B, resulting in greater diversity and distribution shift." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.33, + 0.825, + 0.36 + ], + "angle": 0, + "content": "Section 4.1.4 introduces our data augmentation strategy, which adds more challenging samples to all three sets. Detailed dataset statistics are provided in Appendix B.1." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.373, + 0.349, + 0.387 + ], + "angle": 0, + "content": "4.1.3 Data Annotations" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.396, + 0.827, + 0.508 + ], + "angle": 0, + "content": "To ensure the accuracy of xVerify's training and evaluation, we conducted multiple rounds of automatic and manual annotation across the three datasets. Specifically, we used GPT-4o to perform two rounds of annotation for all samples in the datasets, utilizing two distinct prompt templates (details provided in Appendix D.2) to improve annotation confidence [33, 22]. Given the large size of the training set, we only applied manual annotation to the more challenging math problems and to samples where the two rounds of GPT-4o annotations disagreed. In contrast, for the test and generalization sets, we manually annotated all samples, resulting in a three-round annotation process to maximize label reliability. Details of the manual annotation process are provided in Appendix B.2." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.522, + 0.362, + 0.537 + ], + "angle": 0, + "content": "4.1.4 Data Augmentation" + }, + { + "type": "image", + "bbox": [ + 0.226, + 0.549, + 0.778, + 0.741 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.745, + 0.825, + 0.802 + ], + "angle": 0, + "content": "Figure 2: Data Augmentation Pipelines: (1) transformation of multiple-choice options through numbering conversion and noise injection, (2) diversification of mathematical answers via equivalent expression generation, and (3) final answer sentence transformation using prompt rephrasing, symbol wrapping, and gap token insertion." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.808, + 0.827, + 0.851 + ], + "angle": 0, + "content": "To further enhance the diversity and robustness of the dataset, we designed a series of data augmentation strategies (illustrated in Figure 2) to better simulate real-world evaluation settings and improve the model's tolerance to varied answer formats." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.856, + 0.825, + 0.914 + ], + "angle": 0, + "content": "For multiple-choice questions, we applied two types of augmentations: option index transformation and noise injection. The former converts alphabetical labels to Arabic or Roman numerals, while the latter randomly adds or removes irrelevant distractor options without changing the original question intent, thereby increasing structural complexity." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.937, + 0.504, + 0.948 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.171, + 0.092, + 0.825, + 0.176 + ], + "angle": 0, + "content": "For math problems, we used two approaches: augmentation based on reference answers and LLM responses. In the first approach, we generated 3-5 mathematically equivalent expressions of each reference answer through symbolic and formal transformations, then created new samples accordingly. In the second, we applied the same transformation logic to the final answers in LLM responses, enriching the dataset with varied mathematical formats and helping the model learn equivalence across symbolic expressions." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.182, + 0.827, + 0.268 + ], + "angle": 0, + "content": "We also augmented the final answer statements. Specifically, we extracted answer-bearing sentences from responses generated using restrict prompts, and applied over 1,000 transformation patterns. These included: 20 variations of prompt rephrasing (e.g., \"The answer is B\" \\(\\rightarrow\\) \"The most appropriate answer is B\"), 18 symbolic wrappers (e.g., wrapping B as \\(B\\)), and 5 forms of delimiter insertions (e.g., adding a colon or space before the answer). This improved diversity in answer formats and reduced overfitting to specific templates." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.275, + 0.825, + 0.346 + ], + "angle": 0, + "content": "Together, these strategies expanded the expressive space of the dataset while preserving semantic consistency, offering richer and more challenging training signals for xVerify. After augmentation, the sizes of the training, test, and generalization sets increased to 43,204, 6,122, and 6,468 samples respectively. Full dataset details are provided in Appendix B.1. The augmentation of math problems primarily relied on GPT-4o; prompt templates are listed in Appendix D.3." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.363, + 0.323, + 0.379 + ], + "angle": 0, + "content": "4.2 Model Training" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.39, + 0.825, + 0.529 + ], + "angle": 0, + "content": "We trained 14 models with different parameter sizes and architectures using the training set from the VAR dataset. Specifically, we utilized the LLaMA-Factory framework [38] and QLoRA technique [4] for model training. Based on extensive experimentation, we set the number of epochs to 1 and selected a learning rate of 1e-4 as the optimal configuration, with other hyperparameters detailed in Appendix C.1. Many researchers have pointed out potential bias in using LLMs as judge models, where models from the same family tend to receive higher ratings [19]. To thoroughly evaluate the generalization capability of the xVerify method, we trained 14 models with varying parameter sizes and architectures. These models ranged from 0.5B to 32B parameters and included five different families, such as LLaMA 3 [6], Qwen2.5 [37], and Gemma 2 [30]. Details of the models used are provided in Appendix C.2." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.551, + 0.315, + 0.569 + ], + "angle": 0, + "content": "5 Experiments" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.585, + 0.825, + 0.614 + ], + "angle": 0, + "content": "In this section, we will present the configuration, results, and detailed analysis of the xVerify model evaluation experiments. First, we will outline the experimental setup:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.628, + 0.825, + 0.684 + ], + "angle": 0, + "content": "- Datasets: The datasets used in the evaluation experiments are the test set and generalization set from the VAR dataset. The test set is used to evaluate the xVerify model's performance, while the generalization set supplements the test set by simulating real-world scenarios with a broader sample distribution to assess the model's generalization ability." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.691, + 0.825, + 0.734 + ], + "angle": 0, + "content": "- Metrics: The evaluation mainly uses accuracy and F1 score on both the test and generalization sets. Accuracy shows the model's overall performance, while the F1 score combines precision and recall for a more complete perspective." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.74, + 0.825, + 0.838 + ], + "angle": 0, + "content": "- Baselines: There are two types of baselines: evaluation frameworks and judge models. The evaluation frameworks include DeepSeek-Math [29], LM Eval Harness [5], Math-Verify [13], OpenAI Evalu [25], OpenCompass [27], and UltraEval [9]. The judge models include PandaLM [34], Auto-J [21], Prometheus 2 [17], JudgeLM [39], and CompassJudger [1]. In addition, GPT-4o is also used as a judge model with two strategies: one with CoT and one without. The prompts for the judge model and xVerify are provided in Appendix D.4 and Appendix D.5." + }, + { + "type": "list", + "bbox": [ + 0.217, + 0.628, + 0.825, + 0.838 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.856, + 0.825, + 0.913 + ], + "angle": 0, + "content": "Test Set Evaluation Results. We evaluated all evaluation frameworks, judge models, and the xVerify model on the VAR test set (see Table 1). Overall, the xVerify model outperforms all evaluation frameworks and judge models, including GPT-4o, with the best and second-best values in each column appearing for the xVerify model." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.505, + 0.948 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.172, + 0.097, + 0.825, + 0.138 + ], + "angle": 0, + "content": "Table 1: Evaluation Accuracy Results on the Test Set. \"-\" indicates that the evaluation method is not applicable to the problem type. The best performance in each column will be shown in bold, and the second-best performance will be underlined." + }, + { + "type": "table", + "bbox": [ + 0.175, + 0.138, + 0.825, + 0.406 + ], + "angle": 0, + "content": "
Method TypeMethodMultiple ChoiceMathShort AnswerClassificationOverall
F1Acc.F1Acc.F1Acc.F1Acc.F1Acc.
Evaluation FrameworkDeepSeek Math Verify70.77%75.17%78.34%84.30%----74.90%52.52%
LM Eval Harness58.44%68.19%25.16%28.27%53.41%44.51%72.35%66.94%47.67%48.32%
Math-Verify5.88%53.76%82.55%86.70%42.27%71.91%0.00%29.66%45.64%65.91%
OpenAI Simple Evals23.61%28.02%66.79%76.88%42.23%55.32%73.29%67.87%51.17%58.10%
OpenCompass68.11%72.52%79.25%84.73%----74.18%79.64%
UltraEval17.34%18.04%8.88%56.89%----13.95%40.71%
Judge ModelPandaLM-7B-v14.26%8.12%16.78%14.46%23.47%17.72%25.32%16.79%16.40%13.72%
Auto-J-Bilingual-6B52.85%67.71%40.76%65.21%67.22%79.60%74.86%71.37%57.04%69.59%
Auto-J-13B40.00%63.20%26.32%60.62%64.41%78.22%86.04%82.60%53.38%68.13%
Prometheus-7B-v2.075.76%75.41%74.20%74.35%70.95%74.59%84.80%77.03%76.50%75.11%
Prometheus-8x7B-v2.071.26%68.61%71.99%66.92%76.24%77.70%83.27%77.65%74.57%71.12%
JudgeLM-7B-v1.056.53%42.57%46.09%34.58%60.33%50.56%83.89%73.22%59.02%45.90%
JudgeLM-13B-v1.056.81%48.89%58.39%59.46%77.32%79.52%95.63%93.82%68.57%65.83%
JudgeLM-33B-v1.042.86%43.24%44.82%46.03%57.86%62.23%73.42%67.56%52.00%51.75%
CompassJudger-1-1.5B49.95%35.54%61.66%48.78%57.36%46.93%82.51%70.96%61.94%48.35%
CompassJudger-1-7B70.05%62.78%66.62%58.86%67.47%65.08%92.99%89.50%72.72%65.96%
CompassJudger-1-14B58.94%44.62%55.09%40.76%59.66%52.90%90.87%86.61%63.22%51.37%
CompassJudger-1-32B95.09%95.37%84.11%84.30%94.95%96.11%98.45%97.84%91.67%91.69%
GPT-4o as Judge96.61%96.75%95.27%95.80%95.01%96.20%98.14%97.43%96.25%96.39%
GPT-4o as Judge (CoT)97.10%97.23%95.41%95.88%95.63%96.63%99.56%99.38%96.85%96.95%
xVerifyxVerify-0.5B-I97.78%97.90%93.74%94.64%96.72%97.49%99.71%99.59%96.69%96.85%
xVerify-3B-Ib97.31%97.41%95.65%96.18%96.38%97.23%99.78%99.69%97.17%97.27%
xVerify-7B-I97.75%97.84%95.94%96.44%96.51%97.32%99.78%99.69%97.41%97.50%
xVerify-9B-I97.43%97.53%95.75%96.27%96.06%96.97%99.78%99.69%97.19%97.29%
xVerify-14B-Ia97.49%97.59%95.73%96.22%95.41%96.46%99.63%99.49%97.06%97.16%
xVerify-32B-I97.81%97.90%95.88%96.31%96.18%97.06%99.71%99.59%97.32%97.40%
" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.442, + 0.828, + 0.54 + ], + "angle": 0, + "content": "Among the evaluation frameworks, the best performers were DeepSeek Math Verify and OpenCompass, but neither achieved an F1 score nor accuracy exceeding \\(80\\%\\). Some evaluation frameworks were also not suitable for certain question types, which is an inherent limitation of rule-based methods—strong in specificity but limited in applicability. For instance, OpenCompass was completely unsuitable for short answer and classification questions. Additionally, the long reasoning processes generated by reasoning models made it difficult for evaluation frameworks to extract final answers, lowering their overall performance." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.545, + 0.828, + 0.727 + ], + "angle": 0, + "content": "Among judge models, GPT-4o and CompassJudger showed the best overall performance. The CompassJudger-1-32B model achieved F1 score and accuracy of \\(91.67\\%\\) and \\(91.69\\%\\), respectively. However, the model performed poorly on math questions, with both F1 score and accuracy below \\(85\\%\\), indicating that it handles simpler questions well but struggles with formula equivalence in math problems. Furthermore, only the 32B version of this judge model achieved over \\(90\\%\\) F1 score and accuracy, while smaller models performed below \\(80\\%\\). Therefore, the performance of CompassJudger-1-32B is more a result of the base model's capabilities rather than the subsequent training. For example, the smallest xVerify-0.5B-I model outperforms CompassJudger-1-32B across the board, indicating that the VAR training set significantly improves model evaluation performance. GPT-4o's overall performance is very close to xVerify, but the improvement after using CoT is small, with token consumption nearly doubling. Specifically, GPT-4o as Judge evaluated the entire test set at a cost of $13.09, while GPT-4o as Judge (CoT) cost $20.15 (using the OpenAI API, charged by token count)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.731, + 0.829, + 0.817 + ], + "angle": 0, + "content": "In contrast, even the smallest xVerify-0.5B-I model outperforms all methods except GPT-4o as Judge (CoT) in overall performance, and the xVerify-3B-Ib model surpasses all others in every evaluation metric. Moreover, for more difficult math questions, all xVerify models except xVerify-0.5B-I exceeded \\(95\\%\\) performance. We also found that the performance of the xVerify model improves as the parameter size increases, but slightly decreases after exceeding 7B parameters, likely due to overfitting on the VAR training set, which is sufficiently large for smaller models." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.843, + 0.829, + 0.914 + ], + "angle": 0, + "content": "Generalization Set Evaluation Results. To better assess the performance of xVerify on a broader sample distribution, we evaluated all methods on the VAR generalization set, as shown in Table 2. On the generalization set, the xVerify model showed a slight decrease in overall performance. However, the drop in both F1 score and accuracy was less than \\(1.5\\%\\), while other methods showed mixed results. Overall, the xVerify model still outperformed all other methods, indicating that although" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.506, + 0.948 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.172, + 0.092, + 0.825, + 0.121 + ], + "angle": 0, + "content": "overfitting exists in xVerify, it is limited and the model maintains strong generalization ability on samples outside the training set distribution." + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.142, + 0.825, + 0.184 + ], + "angle": 0, + "content": "Table 2: Evaluation Accuracy Results on the Generalization Set. \"--\" indicates that the evaluation method is not applicable to the problem type. The best performance in each column will be shown in bold, and the second-best performance will be underlined." + }, + { + "type": "table", + "bbox": [ + 0.175, + 0.184, + 0.825, + 0.451 + ], + "angle": 0, + "content": "
Method TypeMethodMultiple ChoiceMathShort AnswerClassificationOverall
F1Acc.F1Acc.F1Acc.F1Acc.F1Acc.
Evaluation FrameworkDeepSeek Math Verify72.90%73.39%11.69%79.83%----60.57%44.42%
LM Eval Harness61.60%65.37%7.03%18.48%58.22%45.09%92.06%88.21%55.81%51.30%
Math-Verify5.19%45.10%64.18%87.68%9.12%52.75%0.00%24.59%16.10%55.53%
OpenAI Simple Evals28.72%29.23%24.31%78.90%58.33%59.58%94.39%91.62%57.99%63.36%
OpenCompass71.64%71.44%47.22%84.39%----65.74%78.18%
UltraEval16.29%15.31%13.55%78.39%----15.71%48.13%
Judge ModelPandaLM-7B-v14.28%7.85%9.91%15.97%45.81%31.43%36.23%25.99%23.74%19.14%
Auto-J-Bilingual-6B52.07%60.75%10.56%74.79%85.16%86.76%84.90%79.91%67.20%74.57%
Auto-J-13B34.87%52.78%9.86%76.54%85.12%86.97%77.67%71.99%60.43%71.35%
Prometheus-7B-v2.076.67%73.66%49.08%71.46%81.52%81.32%79.59%71.92%73.85%74.35%
Prometheus-8x7B-v2.074.13%68.60%49.48%60.27%87.15%86.13%84.70%77.19%74.51%71.69%
JudgeLM-7B-v1.060.22%45.71%12.71%15.40%72.15%62.51%86.11%76.18%59.11%46.38%
JudgeLM-13B-v1.065.39%57.80%21.61%44.87%86.11%84.53%91.78%86.89%69.18%65.63%
JudgeLM-33B-v1.046.99%45.10%20.31%39.99%71.34%66.69%41.92%33.36%46.06%46.01%
CompassJudger-1-1.5B55.75%40.87%34.53%33.62%63.93%51.57%84.49%73.93%60.01%47.65%
CompassJudger-1-7B74.31%65.20%38.27%39.89%88.99%88.15%93.29%89.29%73.47%67.47%
CompassJudger-1-14B63.65%49.50%27.63%21.20%73.61%66.48%88.97%81.92%63.10%51.21%
CompassJudger-1-32B92.93%92.32%72.05%84.91%96.81%96.86%98.05%97.05%91.90%92.04%
GPT-4o as Judge95.86%95.38%87.91%94.76%97.46%97.49%98.67%97.98%96.03%96.18%
GPT-4o as Judge (CoT)95.44%94.88%88.34%94.71%97.39%97.42%98.36%97.52%95.79%95.92%
xVerifyxVerify-0.5B-I96.49%96.10%80.00%91.94%96.95%97.00%99.03%98.53%95.29%95.53%
xVerify-3B-Ib96.21%95.71%86.20%94.15%97.60%97.63%99.03%98.53%96.08%96.23%
xVerify-7B-I96.16%95.66%87.86%94.87%97.45%97.49%98.93%98.37%96.22%96.37%
xVerify-9B-I96.06%95.55%87.47%94.76%97.53%97.56%99.13%98.68%96.23%96.38%
xVerify-14B-Ia96.11%95.60%90.20%95.74%97.32%97.35%99.13%98.68%96.53%96.65%
xVerify-32B-I96.22%95.71%90.09%95.59%97.32%97.35%99.03%98.53%96.50%96.60%
" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.465, + 0.827, + 0.522 + ], + "angle": 0, + "content": "Specifically, the overall F1 score and accuracy of all evaluation frameworks remained below \\(80\\%\\) with only OpenCompass achieving an overall accuracy above \\(70\\%\\). This indicates that rule-based evaluation frameworks have significant limitations in generalization performance, struggling to effectively handle the diverse answers and evaluation sets from LLMs." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.527, + 0.826, + 0.653 + ], + "angle": 0, + "content": "Among the judge models, the best-performing ones remained GPT-4o and CompassJudger. However, all judge models except GPT-4o had an F1 score below \\(75\\%\\) on math questions, with most models scoring below \\(50\\%\\), indicating that judge models almost entirely fail in evaluating more diverse and complex math problems. GPT-4o as Judge and GPT-4o as Judge (CoT) also failed to achieve an F1 score above \\(90\\%\\) on math problems, suggesting that the math samples in the generalization set indeed present challenges for evaluation methods. Furthermore, GPT-4o's performance did not improve after using CoT; instead, it showed a slight decline. This suggests that in broader scenarios, CoT-based prompt engineering methods do not effectively improve GPT-4o's performance as a judge model, and model fine-tuning may be a better option." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.658, + 0.827, + 0.756 + ], + "angle": 0, + "content": "In contrast, the xVerify-0.5B-I model outperformed all evaluation methods except GPT-4o, and the xVerify-3B-Ib model outperformed both CoT-based GPT-4o methods. For more difficult math problems, the F1 score and accuracy of the xVerify-14B-Ia and xVerify-32B-I models exceeded \\(90\\%\\). Additionally, we observed that as the parameter size of the xVerify model increased, the performance drop on the generalization set decreased. For example, the accuracy drop for xVerify-0.5B-I was \\(1.33\\%\\), \\(0.91\\%\\) for xVerify-9B-I, and \\(0.80\\%\\) for xVerify-32B-I, suggesting that larger xVerify models exhibit stronger generalization performance." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.762, + 0.827, + 0.832 + ], + "angle": 0, + "content": "Furthermore, we comprehensively evaluated the performance of 14 x Verify models on both the test and generalization sets, and tested the computational efficiency of all x Verify and judge models, along with the evaluation cost of GPT-4o as a judge model. The results showed that x Verify models outperform other judge models in both usage cost and evaluation efficiency. Full experimental results can be found in Appendix E." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.852, + 0.303, + 0.868 + ], + "angle": 0, + "content": "6 Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.884, + 0.825, + 0.914 + ], + "angle": 0, + "content": "In this paper, we propose an efficient answer verifier for reasoning model evaluations, named xVerify, which can effectively assess the correctness of long reasoning responses generated by" + }, + { + "type": "page_number", + "bbox": [ + 0.493, + 0.936, + 0.506, + 0.948 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.171, + 0.092, + 0.828, + 0.218 + ], + "angle": 0, + "content": "reasoning models on various difficult objective questions. To train and evaluate the xVerify model, we constructed the VAR dataset based on several popular LLMs and evaluation sets. This dataset primarily collects long reasoning responses generated by reasoning models on challenging questions, and multiple rounds of labeling and verification were conducted using GPT-4o and human annotators. Ultimately, we trained multiple xVerify models of varying specifications based on the VAR dataset and performed comparative evaluations with several evaluation frameworks and judge models on both the test and generalization sets. The experimental results show that even the smallest xVerify-0.5B-I model outperforms all methods except GPT-4o, and larger xVerify models surpass all other methods, demonstrating the effectiveness and generalization ability of xVerify." + }, + { + "type": "title", + "bbox": [ + 0.174, + 0.235, + 0.268, + 0.251 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.259, + 0.826, + 0.302 + ], + "angle": 0, + "content": "[1] Maosong Cao, Alexander Lam, Haodong Duan, Hongwei Liu, Songyang Zhang, and Kai Chen. Compassjudger-1: All-in-one judge model helps model evaluation and evolution. arXiv preprint arXiv:2410.16256, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.31, + 0.826, + 0.354 + ], + "angle": 0, + "content": "[2] Yupeng Chang, Xu Wang, Jindong Wang, Yuan Wu, Linyi Yang, Kaijie Zhu, Hao Chen, Xiaoyuan Yi, Cunxiang Wang, Yidong Wang, et al. A survey on evaluation of large language models. ACM transactions on intelligent systems and technology, 15(3):1-45, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.361, + 0.825, + 0.39 + ], + "angle": 0, + "content": "[3] DeepSeek-AI, Daya Guo, Dejian Yang, et al. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.398, + 0.827, + 0.44 + ], + "angle": 0, + "content": "[4] Tim Dettmers, Artidoro Pagnoni, Ari Holtzman, and Luke Zettlemoyer. Qlora: Efficient finetuning of quantized llms. Advances in neural information processing systems, 36:10088-10115, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.449, + 0.826, + 0.52 + ], + "angle": 0, + "content": "[5] Leo Gao, Jonathan Tow, Baber Abbasi, Stella Biderman, Sid Black, Anthony DiPofi, Charles Foster, Laurence Golding, Jeffrey Hsu, Alain Le Noac'h, Haonan Li, Kyle McDonell, Niklas Muennighoff, Chris Ociepa, Jason Phang, Laria Reynolds, Hailey Schoelkopf, Aviya Skowron, Lintang Sutawika, Eric Tang, Anish Thite, Ben Wang, Kevin Wang, and Andy Zou. A framework for few-shot language model evaluation, September 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.528, + 0.826, + 0.543 + ], + "angle": 0, + "content": "[6] Aaron Grattafori, Abhimanyu Dubey, Abhinav Jauhri, et al. The llama 3 herd of models, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.551, + 0.826, + 0.593 + ], + "angle": 0, + "content": "[7] Jiawei Gu, Xuhui Jiang, Zhichao Shi, Hexiang Tan, Xuehao Zhai, Chengjin Xu, Wei Li, Yinghan Shen, Shengjie Ma, Honghao Liu, Saizhuo Wang, Kun Zhang, Yuzhuo Wang, Wen Gao, Lionel Ni, and Jian Guo. A survey on llm-as-a-judge, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.602, + 0.826, + 0.644 + ], + "angle": 0, + "content": "[8] Zishan Guo, Renren Jin, Chuang Liu, Yufei Huang, Dan Shi, Supryadi, Linhao Yu, Yan Liu, Jiaxuan Li, Bojian Xiong, and Deyi Xiong. Evaluating large language models: A comprehensive survey, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.653, + 0.826, + 0.695 + ], + "angle": 0, + "content": "[9] Chaoqun He, Renjie Luo, Shengding Hu, Yuanqian Zhao, Jie Zhou, Hanghao Wu, Jiajie Zhang, Xu Han, Zhiyuan Liu, and Maosong Sun. Ultraeval: A lightweight platform for flexible and comprehensive evaluation for llms. arXiv preprint arXiv:2404.07584, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.703, + 0.825, + 0.746 + ], + "angle": 0, + "content": "[10] Yancheng He, Shilong Li, Jiaheng Liu, Weixun Wang, Xingyuan Bu, Ge Zhang, Zhongyuan Peng, Zhaoxiang Zhang, Zhicheng Zheng, Wenbo Su, and Bo Zheng. Can large language models detect errors in long chain-of-thought reasoning?, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.754, + 0.825, + 0.81 + ], + "angle": 0, + "content": "[11] Dan Hendrycks, Collin Burns, Saurav Kadavath, Akul Arora, Steven Basart, Eric Tang, Dawn Song, and Jacob Steinhardt. Measuring mathematical problem solving with the math dataset. In J. Vanschoeren and S. Yeung, editors, Proceedings of the Neural Information Processing Systems Track on Datasets and Benchmarks, volume 1, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.819, + 0.827, + 0.862 + ], + "angle": 0, + "content": "[12] Xinyu Hu, Li Lin, Mingqi Gao, Xunjian Yin, and Xiaojun Wan. Themis: A reference-free nlg evaluation language model with flexibility and interpretability. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, pages 15924-15951, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.87, + 0.827, + 0.911 + ], + "angle": 0, + "content": "[13] Greg Gandenberger Hynek Kydlíček. GitHub - huggingface/Math-Verify: A robust mathematical expression evaluation system designed for assessing Large Language Model outputs in mathematical tasks., 2024." + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.259, + 0.827, + 0.911 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.091, + 0.828, + 0.176 + ], + "angle": 0, + "content": "[14] Alon Jacovi, Yonatan Bitton, Bernd Bohnet, Jonathan Herzig, Or Honovich, Michael Tseng, Michael Collins, Roee Aharoni, and Mor Geva. A chain-of-thought is as strong as its weakest link: A benchmark for verifiers of reasoning chains. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 4615–4634, Bangkok, Thailand, August 2024. Association for Computational Linguistics." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.184, + 0.826, + 0.226 + ], + "angle": 0, + "content": "[15] Aaron Jaech, Adam Kalai, Adam Lerer, Adam Richardson, Ahmed El-Kishky, Aiden Low, Alec Helyar, Aleksander Madry, Alex Beutel, Alex Carney, et al. Openai o1 system card. arXiv preprint arXiv:2412.16720, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.234, + 0.826, + 0.304 + ], + "angle": 0, + "content": "[16] Pei Ke, Bosi Wen, Zhuoer Feng, Xiao Liu, Xuanyu Lei, Jiale Cheng, Shengyuan Wang, Aohan Zeng, Yuxiao Dong, Hongning Wang, Jie Tang, and Minlie Huang. Critiquellm: Towards an informative critique generation model for evaluation of large language model generation. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.313, + 0.826, + 0.398 + ], + "angle": 0, + "content": "[17] Seungone Kim, Juyoung Suk, Shayne Longpre, Bill Yuchen Lin, Jamin Shin, Sean Welleck, Graham Neubig, Moontae Lee, Kyungjae Lee, and Minjoon Seo. *Prometheus* 2: An open source language model specialized in evaluating other language models. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen, editors, *Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing*, pages 4334–4353, Miami, Florida, USA, November 2024. Association for Computational Linguistics." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.405, + 0.826, + 0.449 + ], + "angle": 0, + "content": "[18] Dawei Li, Bohan Jiang, Liangjie Huang, Alimohammad Beigi, Chengshuai Zhao, Zhen Tan, Amrita Bhattacharjee, Yuxuan Jiang, Canyu Chen, Tianhao Wu, Kai Shu, Lu Cheng, and Huan Liu. From generation to judgment: Opportunities and challenges of llm-as-a-judge, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.457, + 0.826, + 0.499 + ], + "angle": 0, + "content": "[19] Dawei Li, Renliang Sun, Yue Huang, Ming Zhong, Bohan Jiang, Jiawei Han, Xiangliang Zhang, Wei Wang, and Huan Liu. Preference leakage: A contamination problem in llm-as-a-judge, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.507, + 0.826, + 0.537 + ], + "angle": 0, + "content": "[20] Haitao Li, Qian Dong, Junjie Chen, Huixue Su, Yujia Zhou, Qingyao Ai, Ziyi Ye, and Yiqun Liu. Llms-as-judges: A comprehensive survey on llm-based evaluation methods, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.544, + 0.826, + 0.586 + ], + "angle": 0, + "content": "[21] Junlong Li, Shichao Sun, Weizhe Yuan, Run-Ze Fan, hai zhao, and Pengfei Liu. Generative judge for evaluating alignment. In The Twelfth International Conference on Learning Representations, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.595, + 0.826, + 0.639 + ], + "angle": 0, + "content": "[22] Xun Liang, Shichao Song, Zifan Zheng, Hanyu Wang, Qingchen Yu, Xunkai Li, Rong-Hua Li, Yi Wang, Zhonghao Wang, Feiyu Xiong, and Zhiyu Li. Internal consistency and self-feedback in large language models: A survey, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.646, + 0.826, + 0.689 + ], + "angle": 0, + "content": "[23] Junnan Liu, Hongwei Liu, Linchen Xiao, Ziyi Wang, Kuikun Liu, Songyang Gao, Wenwei Zhang, Songyang Zhang, and Kai Chen. Are your llms capable of stable reasoning? arXiv preprint arXiv:2412.13147, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.697, + 0.826, + 0.727 + ], + "angle": 0, + "content": "[24] MAA. American invitational mathematics examination - aide. American Invitational Mathematics Examination - AIME 2024, February 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.734, + 0.826, + 0.764 + ], + "angle": 0, + "content": "[25] OpenAI. GitHub - openai/evals: Evals is a framework for evaluating LLMs and LLM systems, and an open-source registry of benchmarks., 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.771, + 0.422, + 0.787 + ], + "angle": 0, + "content": "[26] OpenAI. Openai o3-mini, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.795, + 0.826, + 0.825 + ], + "angle": 0, + "content": "[27] OpenMMLab. Opencompass: A universal evaluation platform for foundation models. https://github.com/open-compass/opencompass, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.833, + 0.826, + 0.875 + ], + "angle": 0, + "content": "[28] David Rein, Betty Li Hou, Asa Cooper Stickland, Jackson Petty, Richard Yuanzhe Pang, Julien Dirani, Julian Michael, and Samuel R. Bowman. GPQA: A graduate-level google-proof q&a benchmark. In First Conference on Language Modeling, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.883, + 0.826, + 0.913 + ], + "angle": 0, + "content": "[29] Zhihong Shao, Peiyi Wang, Qihao Zhu, et al. Deepseekmath: Pushing the limits of mathematical reasoning in open language models, 2024." + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.091, + 0.828, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.091, + 0.826, + 0.121 + ], + "angle": 0, + "content": "[30] Gemma Team, Morgane Riviere, Shreya Pathak, et al. Gemma 2: Improving open language models at a practical size, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.129, + 0.775, + 0.146 + ], + "angle": 0, + "content": "[31] Qwen Team. Qwq-32b: Embracing the power of reinforcement learning, March 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.153, + 0.827, + 0.224 + ], + "angle": 0, + "content": "[32] Xinpeng Wang, Bolei Ma, Chengzhi Hu, Leon Weber-Genzel, Paul Röttger, Frauke Kreuter, Dirk Hovy, and Barbara Plank. \"my answer is C\": First-token probabilities do not match text answers in instruction-tuned language models. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Findings of the Association for Computational Linguistics: ACL 2024, pages 7407–7416, Bangkok, Thailand, August 2024. Association for Computational Linguistics." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.232, + 0.825, + 0.275 + ], + "angle": 0, + "content": "[33] Xuezhi Wang, Jason Wei, Dale Schuurmans, Quoc V Le, Ed H. Chi, Sharan Narang, Aakanksha Chowdhery, and Denny Zhou. Self-consistency improves chain of thought reasoning in language models. In The Eleventh International Conference on Learning Representations, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.283, + 0.825, + 0.326 + ], + "angle": 0, + "content": "[34] Yidong Wang, Zhuohao Yu, Zhengran Zeng, Linyi Yang, Cunxiang Wang, Hao Chen, Chaoya Jiang, Rui Xie, Jindong Wang, Xing Xie, Wei Ye, Shikun Zhang, and Yue Zhang. Pandalm: An automatic evaluation benchmark for llm instruction tuning optimization. 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.334, + 0.827, + 0.405 + ], + "angle": 0, + "content": "[35] Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, brian richter, Fei Xia, Ed Chi, Quoc V Le, and Denny Zhou. Chain-of-thought prompting elicits reasoning in large language models. In S. Koyejo, S. Mohamed, A. Agarwal, D. Belgrave, K. Cho, and A. Oh, editors, Advances in Neural Information Processing Systems, volume 35, pages 24824-24837. Curran Associates, Inc., 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.413, + 0.827, + 0.483 + ], + "angle": 0, + "content": "[36] Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, brian richter, Fei Xia, Ed Chi, Quoc V Le, and Denny Zhou. Chain-of-thought prompting elicits reasoning in large language models. In S. Koyejo, S. Mohamed, A. Agarwal, D. Belgrave, K. Cho, and A. Oh, editors, Advances in Neural Information Processing Systems, volume 35, pages 24824-24837. Curran Associates, Inc., 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.492, + 0.825, + 0.521 + ], + "angle": 0, + "content": "[37] An Yang, Baosong Yang, Beichen Zhang, et al. Qwen2.5 technical report. arXiv preprint arXiv:2412.15115, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.53, + 0.825, + 0.601 + ], + "angle": 0, + "content": "[38] Yaowei Zheng, Richong Zhang, Junhao Zhang, Yanhan Ye, Zheyan Luo, Zhangchi Feng, and Yongqiang Ma. Llamafactory: Unified efficient fine-tuning of \\(100+\\) language models. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 3: System Demonstrations), Bangkok, Thailand, 2024. Association for Computational Linguistics." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.609, + 0.827, + 0.651 + ], + "angle": 0, + "content": "[39] Lianghui Zhu, Xinggang Wang, and Xinlong Wang. JudgeLM: Fine-tuned large language models are scalable judges. In The Thirteenth International Conference on Learning Representations, 2025." + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.091, + 0.827, + 0.651 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.172, + 0.088, + 0.343, + 0.117 + ], + "angle": 0, + "content": "Appendices" + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.148, + 0.826, + 0.163 + ], + "angle": 0, + "content": "A Datasets and Models 14" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.181, + 0.825, + 0.196 + ], + "angle": 0, + "content": "B VAR Dataset Details 14" + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.148, + 0.826, + 0.196 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.202, + 0.825, + 0.218 + ], + "angle": 0, + "content": "B.1 Details of Training, Test, and Generalization Sets 15" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.223, + 0.825, + 0.238 + ], + "angle": 0, + "content": "B.2 Details of Human Annotation 19" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.244, + 0.825, + 0.259 + ], + "angle": 0, + "content": "B.3 Examples from the VAR Dataset 21" + }, + { + "type": "list", + "bbox": [ + 0.197, + 0.202, + 0.825, + 0.259 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.277, + 0.826, + 0.292 + ], + "angle": 0, + "content": "C Model Training Details 22" + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.298, + 0.825, + 0.314 + ], + "angle": 0, + "content": "C.1 Training Hyperparameters 22" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.318, + 0.825, + 0.334 + ], + "angle": 0, + "content": "C.2 Original Model Details 22" + }, + { + "type": "list", + "bbox": [ + 0.197, + 0.298, + 0.825, + 0.334 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.351, + 0.826, + 0.367 + ], + "angle": 0, + "content": "D Prompts 22" + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.372, + 0.825, + 0.388 + ], + "angle": 0, + "content": "D.1 Prompts for Generating LLM Responses 22" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.393, + 0.825, + 0.408 + ], + "angle": 0, + "content": "D.2 Prompts for GPT-4o Annotation 23" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.414, + 0.825, + 0.429 + ], + "angle": 0, + "content": "D.3 Prompts for Data Augmentation 23" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.435, + 0.825, + 0.45 + ], + "angle": 0, + "content": "D.4 Prompts for Judge Model 23" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.455, + 0.825, + 0.471 + ], + "angle": 0, + "content": "D.5 Prompts for xVerify 25" + }, + { + "type": "list", + "bbox": [ + 0.197, + 0.372, + 0.825, + 0.471 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.488, + 0.826, + 0.504 + ], + "angle": 0, + "content": "E Supplementary Experimental Results 25" + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.51, + 0.825, + 0.524 + ], + "angle": 0, + "content": "E.1 Evaluation Accuracy Results of All xVerify Models 25" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.53, + 0.825, + 0.546 + ], + "angle": 0, + "content": "E.2 Computational Efficiency and Operational Cost of xVerify and Judge Models 26" + }, + { + "type": "list", + "bbox": [ + 0.197, + 0.51, + 0.825, + 0.546 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.176, + 0.09, + 0.383, + 0.107 + ], + "angle": 0, + "content": "A Datasets and Models" + }, + { + "type": "text", + "bbox": [ + 0.176, + 0.132, + 0.825, + 0.162 + ], + "angle": 0, + "content": "This section will present the relevant information for all the public datasets and LLMs involved in the experiments of this paper." + }, + { + "type": "text", + "bbox": [ + 0.176, + 0.167, + 0.826, + 0.28 + ], + "angle": 0, + "content": "In this study, we employ a total of 24 datasets, which are categorized into four primary types: multiple-choice questions (Choice), short answer questions (Short Answer), mathematical problems (Math), and classification tasks (Classification), as summarized in Table 3. To evaluate the multilingual capabilities of the xVerify model, each question type includes datasets in both Chinese and English, with one dataset featuring multilingual content. For each dataset, samples are partitioned into training and test sets following a 2:1 ratio, with the training and test sets ideally comprising 2,000 and 1,000 instances, respectively. In certain cases, the number of available samples is below 3,000, or the official test set is not publicly available, resulting in reduced dataset sizes after preprocessing." + }, + { + "type": "table_caption", + "bbox": [ + 0.176, + 0.308, + 0.825, + 0.352 + ], + "angle": 0, + "content": "Table 3: Datasets Description. The \"Type\" column indicates the question type in the corresponding dataset, including multiple-choice questions (Choice), short answer questions (Short Answer), math questions (Math), and classification questions (Classification)." + }, + { + "type": "table", + "bbox": [ + 0.175, + 0.359, + 0.825, + 0.727 + ], + "angle": 0, + "content": "
DatasetType#Train#TestLanguageLicense
CMMLUChoice20001000ChineseCC-BY-NC-4.0
C-EvalChoice1346260ChineseCC-BY-NC-SA-4.0
GPQAChoice794398EnglishCC-BY-4.0
MMLUChoice18161000EnglishMIT
MMLU-ProChoice20001000EnglishMIT
MMLU-RutexChoice20001000EnglishCC-BY-4.0
AgNewsClassification20001000EnglishUnspecified
AmazonClassification20001000EnglishApache-2.0
CLUEWSCClassification15481000ChineseUnspecified
CMNLIClassification20001000ChineseApache-2.0
AMC23Math2614EnglishUnspecified
AIME 2024Math2010EnglishMIT
CMATHMath1128565ChineseCC-BY-4.0
GSM8KMath20001000EnglishMIT
LiveMathBenchMath19093English & ChineseCC-BY-4.0
MATHMath20001000EnglishMIT
MGSMMath1892946MultilingualCC-BY-SA-4.0
OlympiadBenchMath1787892English & ChineseApache-2.0
ARCShort Answer20001000EnglishCC-BY-SA-4.0
CHIDShort Answer20001000ChineseApache-2.0
C-SimpleQAShort Answer20001000ChineseCC-BY-NC-SA-4.0
DROPShort Answer20001000EnglishCC-BY-SA-4.0
FRAMESShort Answer550274EnglishApache-2.0
SimpleQAShort Answer20001000EnglishMIT
" + }, + { + "type": "text", + "bbox": [ + 0.176, + 0.758, + 0.825, + 0.8 + ], + "angle": 0, + "content": "A total of 19 large language models (LLMs) are utilized in our experiments, encompassing a diverse range of model sizes and types, with a particular emphasis on reasoning models (see Table 4). These models are subsequently used to collect LLM-generated responses and to train the xVerify model." + }, + { + "type": "title", + "bbox": [ + 0.176, + 0.839, + 0.379, + 0.856 + ], + "angle": 0, + "content": "B VAR Dataset Details" + }, + { + "type": "text", + "bbox": [ + 0.176, + 0.883, + 0.825, + 0.912 + ], + "angle": 0, + "content": "This section will present detailed information about the components of the VAR dataset, the details of human annotations, and examples from the dataset." + }, + { + "type": "page_number", + "bbox": [ + 0.492, + 0.936, + 0.508, + 0.947 + ], + "angle": 0, + "content": "14" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.172, + 0.088, + 0.828, + 0.117 + ], + "angle": 0, + "content": "Table 4: LLMs Description. LLMs are listed by release date. All models are chat or instruct type. \"NaN\" indicates that public data is unavailable." + }, + { + "type": "table", + "bbox": [ + 0.24, + 0.124, + 0.759, + 0.417 + ], + "angle": 0, + "content": "
Model#Para.TypePublisherDate
ChatGLM3-6B6BChatTsinghua2023.10
GPT-4oNaNChatOpenAI2024.05
Gemma-2-2B-it2BInstructGoogle2024.06
Gemma-2-9B-it9BInstructGoogle2024.06
GLM-4-9B-Chat9BChatTsinghua2024.06
InternLM2.5-7B-Chat7BChatShLab2024.06
Qwen2-1.5B-Instruct1.5BInstructAlibaba2024.06
Qwen2-7B-Instruct7BInstructAlibaba2024.06
Llama-3.1-8B-Instruct8BInstructMeta2024.07
Llama-3.2-1B-Instruct1BInstructMeta2024.09
Llama-3.2-3B-Instruct3BInstructMeta2024.09
Qwen2.5-7B-Instruct7BInstructAlibaba2024.09
Qwen2.5-14B-Instruct14BInstructAlibaba2024.09
Phi-414BChatMicrosoft2024.11
DeepSeek-R1-Distill-Llama-8B8BDistillDeepSeek2025.01
DeepSeek-R1-Distill-Qwen-1.5B1.5BDistillDeepSeek2025.01
DeepSeek-R1-Distill-Qwen-7B7BDistillDeepSeek2025.01
DeepSeek-R1-Distill-Qwen-14B14BDistillDeepSeek2025.01
QwQ-32B32BInstructAlibaba2025.03
" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.445, + 0.557, + 0.46 + ], + "angle": 0, + "content": "B.1 Details of Training, Test, and Generalization Sets" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.472, + 0.317, + 0.487 + ], + "angle": 0, + "content": "B.1.1 Training Set" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.497, + 0.825, + 0.54 + ], + "angle": 0, + "content": "The training set comprises 43,204 samples. Tables 5 to 8 provide the sample counts corresponding to each LLM, dataset, prompt template, and question type. Note that datasets with names containing \"_enh\" refer to the augmented multiple choice question datasets." + }, + { + "type": "table_caption", + "bbox": [ + 0.289, + 0.555, + 0.707, + 0.571 + ], + "angle": 0, + "content": "Table 5: Number of samples from each LLM in the training set." + }, + { + "type": "table", + "bbox": [ + 0.325, + 0.577, + 0.673, + 0.815 + ], + "angle": 0, + "content": "
ModelSample Counts
ChatGLM3-6B2588
GPT-4o2691
Gemma-2-2B-it2657
Gemma-2-9B-it2600
GLM-4-9B-Chat2957
InternLM2.5-7B-Chat2935
Qwen2-1.5B-Instruct2700
Qwen2-7B-Instruct2898
LLaMA-3.1-8B-Instruct2852
Qwen2.5-7B-Instruct2854
Qwen2.5-14B-Instruct2801
DeepSeek-R1-Distill-Llama-8B3223
DeepSeek-R1-Distill-Qwen-1.5B3231
DeepSeek-R1-Distill-Qwen-7B3075
DeepSeek-R1-Distill-Qwen-14B3142
" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.845, + 0.285, + 0.859 + ], + "angle": 0, + "content": "B.1.2 Test Set" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.87, + 0.825, + 0.913 + ], + "angle": 0, + "content": "The test set comprises 6,122 samples. Tables 9 to 12 provide the sample counts corresponding to each LLM, dataset, prompt template, and question type. Note that datasets with names containing \"_enh\" refer to the augmented multiple choice question datasets." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "15" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.283, + 0.118, + 0.713, + 0.133 + ], + "angle": 0, + "content": "Table 6: Number of samples from each dataset in the training set." + }, + { + "type": "table", + "bbox": [ + 0.368, + 0.14, + 0.631, + 0.475 + ], + "angle": 0, + "content": "
DatasetSample Counts
CMMLU1557
CMMLU_enh1641
GPQA1587
GPQA_enh1668
MMLU1520
MMLU_enh1513
MMLU-Pro1394
MMLU-Pro_enh1442
AgNews1751
CLUEWSC5008
AMC231625
AIME 20241333
CMATH1893
GSM8K1836
MATH2485
MGSM1384
OlympiadBench_en2573
OlympiadBench_zh2709
CHID2424
C-SimpleQA1913
DROP1928
FRAMES2020
" + }, + { + "type": "table_caption", + "bbox": [ + 0.252, + 0.541, + 0.744, + 0.557 + ], + "angle": 0, + "content": "Table 7: Number of samples from each prompt template in the training set." + }, + { + "type": "table", + "bbox": [ + 0.372, + 0.563, + 0.625, + 0.706 + ], + "angle": 0, + "content": "
Prompt TemplateSample Counts
0-shot4884
0-shot-restrict5977
0-shot-cot4907
0-shot-cot-restrict6041
5-shot4774
5-shot-restrict5866
5-shot-cot4916
5-shot-cot-restrict5839
" + }, + { + "type": "table_caption", + "bbox": [ + 0.262, + 0.771, + 0.735, + 0.787 + ], + "angle": 0, + "content": "Table 8: Number of samples from each question type in the training set." + }, + { + "type": "table", + "bbox": [ + 0.381, + 0.793, + 0.617, + 0.88 + ], + "angle": 0, + "content": "
DatasetSample Counts
Multiple Choice12322
Math15838
Short Answer8285
Classification6759
" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.51, + 0.948 + ], + "angle": 0, + "content": "16" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.303, + 0.102, + 0.695, + 0.117 + ], + "angle": 0, + "content": "Table 9: Number of samples from each LLM in the test set." + }, + { + "type": "table", + "bbox": [ + 0.325, + 0.125, + 0.675, + 0.362 + ], + "angle": 0, + "content": "
ModelSample Counts
ChatGLM3-6B378
GPT-4o400
Gemma-2-2B-it416
Gemma-2-9B-it369
GLM-4-9B-Chat367
InternLM2.5-7B-Chat367
Qwen2-1.5B-Instruct433
Qwen2-7B-Instruct427
LLaMA-3.1-8B-Instruct404
Qwen2.5-7B-Instruct374
Qwen2.5-14B-Instruct415
DeepSeek-R1-Distill-Llama-8B430
DeepSeek-R1-Distill-Qwen-1.5B451
DeepSeek-R1-Distill-Qwen-7B439
DeepSeek-R1-Distill-Qwen-14B452
" + }, + { + "type": "table_caption", + "bbox": [ + 0.293, + 0.397, + 0.704, + 0.412 + ], + "angle": 0, + "content": "Table 10: Number of samples from each dataset in the test set." + }, + { + "type": "table", + "bbox": [ + 0.368, + 0.419, + 0.631, + 0.753 + ], + "angle": 0, + "content": "
DatasetSample Counts
CMMLU216
CMMLU_enh195
GPQA207
GPQA_enh235
MMLU225
MMLU_enh222
MMLU-Pro171
MMLU-Pro_enh192
AgNews261
CLUEWSC710
AMC23258
AIME 2024186
CMATH263
GSM8K262
MATH362
MGSM205
OlympiadBench_en349
OlympiadBench_zh446
CHID347
C-SimpleQA270
DROP265
FRAMES275
" + }, + { + "type": "table_caption", + "bbox": [ + 0.263, + 0.787, + 0.735, + 0.803 + ], + "angle": 0, + "content": "Table 11: Number of samples from each prompt template in the test set." + }, + { + "type": "table", + "bbox": [ + 0.381, + 0.81, + 0.618, + 0.896 + ], + "angle": 0, + "content": "
DatasetSample Counts
Multiple Choice1663
Math2331
Short Answer1157
Classification971
" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "17" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.272, + 0.088, + 0.725, + 0.104 + ], + "angle": 0, + "content": "Table 12: Number of samples from each question type in the test set." + }, + { + "type": "table", + "bbox": [ + 0.372, + 0.11, + 0.625, + 0.253 + ], + "angle": 0, + "content": "
Prompt TemplateSample Counts
0-shot680
0-shot-restrict798
0-shot-cot642
0-shot-cot-restrict891
5-shot690
5-shot-restrict789
5-shot-cot702
5-shot-cot-restrict930
" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.28, + 0.359, + 0.294 + ], + "angle": 0, + "content": "B.1.3 Generalization Set" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.306, + 0.828, + 0.351 + ], + "angle": 0, + "content": "The generalization set comprises 6,468 samples. Tables 13 to 16 provide the sample counts corresponding to each LLM, dataset, prompt template, and question type. Note that datasets with names containing \"_enh\" refer to the augmented multiple choice question datasets." + }, + { + "type": "table_caption", + "bbox": [ + 0.263, + 0.364, + 0.733, + 0.379 + ], + "angle": 0, + "content": "Table 13: Number of samples from each LLM in the generalization set." + }, + { + "type": "table", + "bbox": [ + 0.324, + 0.386, + 0.675, + 0.681 + ], + "angle": 0, + "content": "
ModelSample Counts
ChatGLM3-6B300
GPT-4o305
Gemma-2-2B-it427
Gemma-2-9B-it296
GLM-4-9B-Chat339
InternLM2.5-7B-Chat341
Qwen2-1.5B-Instruct280
Qwen2-7B-Instruct346
LLaMA-3.1-8B-Instruct400
LLaMA-3.2-1B-Instruct314
LLaMA-3.2-3B-Instruct310
Qwen2.5-7B-Instruct326
Qwen2.5-14B-Instruct334
Phi-4314
DeepSeek-R1-Distill-Llama-8B341
DeepSeek-R1-Distill-Qwen-1.5B399
DeepSeek-R1-Distill-Qwen-7B375
DeepSeek-R1-Distill-Qwen-14B434
QwQ-32B287
" + }, + { + "type": "table_caption", + "bbox": [ + 0.258, + 0.707, + 0.741, + 0.722 + ], + "angle": 0, + "content": "Table 14: Number of samples from each dataset in the generalization set." + }, + { + "type": "table", + "bbox": [ + 0.368, + 0.728, + 0.631, + 0.898 + ], + "angle": 0, + "content": "
DatasetSample Counts
C-Eval435
C-Eval_enh442
MMLU-Redux436
MMLU-Redux_enh483
Amazon646
CMNLI643
LiveMathBench_en1127
LiveMathBench_zh821
ARC807
SimpleQA628
" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "18" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.227, + 0.088, + 0.772, + 0.104 + ], + "angle": 0, + "content": "Table 15: Number of samples from each prompt template in the generalization set." + }, + { + "type": "table", + "bbox": [ + 0.38, + 0.111, + 0.619, + 0.197 + ], + "angle": 0, + "content": "
DatasetSample Counts
Multiple Choice1796
Math1948
Short Answer1435
Classification1289
" + }, + { + "type": "table_caption", + "bbox": [ + 0.236, + 0.208, + 0.761, + 0.223 + ], + "angle": 0, + "content": "Table 16: Number of samples from each question type in the generalization set." + }, + { + "type": "table", + "bbox": [ + 0.373, + 0.231, + 0.625, + 0.372 + ], + "angle": 0, + "content": "
Prompt TemplateSample Counts
0-shot703
0-shot-restrict856
0-shot-cot772
0-shot-cot-restrict915
5-shot690
5-shot-restrict885
5-shot-cot756
5-shot-cot-restrict891
" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.397, + 0.423, + 0.411 + ], + "angle": 0, + "content": "B.2 Details of Human Annotation" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.422, + 0.828, + 0.507 + ], + "angle": 0, + "content": "To ensure high-quality annotation for the VAR dataset, we assembled a team of 8 annotators. Among them, 6 hold bachelor's degrees and are primarily responsible for batch annotation tasks, while the other 2 hold master's degrees and focus on reviewing complex cases or resolving discrepancies in annotations made by multiple annotators. The gender ratio within the annotation team is balanced at 1:1. In terms of compensation, all annotators were paid according to the local industry average rates. The annotation process lasted for three weeks, covering a total of 15 working days." + }, + { + "type": "image", + "bbox": [ + 0.174, + 0.518, + 0.825, + 0.778 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.33, + 0.786, + 0.667, + 0.802 + ], + "angle": 0, + "content": "Figure 3: Illustration of the Label Studio Interface." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.815, + 0.828, + 0.913 + ], + "angle": 0, + "content": "The detailed annotation guidelines are presented below. Figure 3 shows an example of the interface used in our annotation tool. Each sample to be annotated contains four fields: question, LLM output, correct answer, and answer range. The question type includes four categories: multiple choice, math, short answer, and classification. Annotators are required to judge whether the LLM output matches the correct answer based on the question, while the answer range serves as auxiliary reference information to support the decision-making process. The specific annotation instructions and criteria are as follows:" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.51, + 0.948 + ], + "angle": 0, + "content": "19" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.172, + 0.092, + 0.56, + 0.108 + ], + "angle": 0, + "content": "Answer evaluation criteria for different question types:" + }, + { + "type": "title", + "bbox": [ + 0.217, + 0.119, + 0.348, + 0.132 + ], + "angle": 0, + "content": "- Multiple Choice" + }, + { + "type": "text", + "bbox": [ + 0.227, + 0.133, + 0.827, + 0.175 + ], + "angle": 0, + "content": "For multiple-choice questions, answer options may be labeled with letters (A, B, C, D, ...) Roman numerals (I, II, III, IV, ...), or Arabic numerals (1, 2, 3, 4, ...). The LLM output is considered correct if it provides:" + }, + { + "type": "text", + "bbox": [ + 0.246, + 0.179, + 0.46, + 0.193 + ], + "angle": 0, + "content": "- Only the correct option label;" + }, + { + "type": "text", + "bbox": [ + 0.246, + 0.196, + 0.476, + 0.21 + ], + "angle": 0, + "content": "- Only the correct option content;" + }, + { + "type": "text", + "bbox": [ + 0.246, + 0.212, + 0.492, + 0.226 + ], + "angle": 0, + "content": "- Both the correct label and content." + }, + { + "type": "list", + "bbox": [ + 0.246, + 0.179, + 0.492, + 0.226 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.228, + 0.23, + 0.825, + 0.286 + ], + "angle": 0, + "content": "In cases where the label and content are inconsistent, the content takes precedence. If the content is correct, the answer is marked as correct; if the content is incorrect, the answer is marked as incorrect, even if the option label is correct (see the final annotation example for reference)." + }, + { + "type": "title", + "bbox": [ + 0.217, + 0.291, + 0.333, + 0.303 + ], + "angle": 0, + "content": "- Short Answer" + }, + { + "type": "text", + "bbox": [ + 0.228, + 0.305, + 0.825, + 0.332 + ], + "angle": 0, + "content": "Short-answer questions may require responses such as names, locations, numbers, dates, or full sentences. The evaluation criteria are:" + }, + { + "type": "text", + "bbox": [ + 0.245, + 0.337, + 0.802, + 0.352 + ], + "angle": 0, + "content": "- For concise answers (e.g., names, places, dates), strict string matching is required." + }, + { + "type": "text", + "bbox": [ + 0.245, + 0.353, + 0.826, + 0.368 + ], + "angle": 0, + "content": "- For sentence-level answers, semantic consistency with the reference answer is required." + }, + { + "type": "text", + "bbox": [ + 0.245, + 0.369, + 0.825, + 0.398 + ], + "angle": 0, + "content": "- For numerical answers, mathematical equivalence must be verified (e.g., \"12000\" and \"12,000\" are considered equivalent)." + }, + { + "type": "list", + "bbox": [ + 0.245, + 0.337, + 0.826, + 0.398 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.217, + 0.402, + 0.33, + 0.415 + ], + "angle": 0, + "content": "- Classification" + }, + { + "type": "text", + "bbox": [ + 0.228, + 0.416, + 0.825, + 0.445 + ], + "angle": 0, + "content": "Classification questions come with a fixed set of candidate answers. The LLM output must explicitly and exactly match the correct answer in this set to be judged as correct." + }, + { + "type": "title", + "bbox": [ + 0.217, + 0.449, + 0.274, + 0.46 + ], + "angle": 0, + "content": "Math" + }, + { + "type": "text", + "bbox": [ + 0.228, + 0.462, + 0.825, + 0.491 + ], + "angle": 0, + "content": "For mathematical questions, the final answer in the LLM output must be mathematically equivalent to the reference answer. Evaluation criteria include:" + }, + { + "type": "text", + "bbox": [ + 0.245, + 0.495, + 0.825, + 0.522 + ], + "angle": 0, + "content": "- If an initial answer (ans1) is given but followed by a derived final answer (ans2) through calculation, ans2 should be used for evaluation." + }, + { + "type": "text", + "bbox": [ + 0.245, + 0.525, + 0.825, + 0.553 + ], + "angle": 0, + "content": "- If the LLM output or ground-truth answer is provided in LaTeX format and cannot be visually interpreted, a LaTeX compiler should be used to determine equivalence." + }, + { + "type": "list", + "bbox": [ + 0.245, + 0.495, + 0.825, + 0.553 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.566, + 0.272, + 0.58 + ], + "angle": 0, + "content": "Special cases:" + }, + { + "type": "title", + "bbox": [ + 0.217, + 0.593, + 0.4, + 0.607 + ], + "angle": 0, + "content": "Overly Long Responses" + }, + { + "type": "text", + "bbox": [ + 0.228, + 0.607, + 0.827, + 0.648 + ], + "angle": 0, + "content": "If the LLM output is excessively long, use the final answer provided as the basis for judgment. If the response does not converge to a clear answer (e.g., repeated changes or ambiguity), it should be marked as incorrect." + }, + { + "type": "title", + "bbox": [ + 0.217, + 0.653, + 0.399, + 0.666 + ], + "angle": 0, + "content": "- Truncated Calculations" + }, + { + "type": "text", + "bbox": [ + 0.228, + 0.667, + 0.826, + 0.709 + ], + "angle": 0, + "content": "In long responses where the final verification or calculation is truncated, it can be ignored. If a clear answer was provided earlier, use it for evaluation; if not, mark the response as incorrect (see the second-to-last annotation example)." + }, + { + "type": "title", + "bbox": [ + 0.217, + 0.713, + 0.379, + 0.726 + ], + "angle": 0, + "content": "Unfamiliar Domains" + }, + { + "type": "text", + "bbox": [ + 0.228, + 0.727, + 0.825, + 0.769 + ], + "angle": 0, + "content": "If the correctness of the LLM response cannot be determined due to unfamiliar domain knowledge (e.g., mathematical expressions whose equivalence is hard to judge), the case may be skipped and will later be annotated by more qualified annotators." + }, + { + "type": "title", + "bbox": [ + 0.217, + 0.773, + 0.464, + 0.786 + ], + "angle": 0, + "content": "- Irrelevant but Matched Answers" + }, + { + "type": "text", + "bbox": [ + 0.228, + 0.787, + 0.825, + 0.815 + ], + "angle": 0, + "content": "If the LLM output is irrelevant to the question but coincidentally contains the correct final answer, it should be marked as incorrect." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "20" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.173, + 0.092, + 0.444, + 0.107 + ], + "angle": 0, + "content": "B.3 Examples from the VAR Dataset" + }, + { + "type": "code", + "bbox": [ + 0.171, + 0.119, + 0.832, + 0.821 + ], + "angle": 0, + "content": "{ \"dataset\": \"SimpleQA\", \"question\": \"In which year did Fayaz A. Malik (an Indian pharmacologist, cancer biologist, and scientist) receive the Young Scientist of the Year from the Council of Scientific and Industrial Research?\", \"question_type\": \"short answer\", \"correct_answer\": \"2009\", \"llm_output\": \"The year Fayaz A. Malik received the Young Scientist of the Year award from the Council of Scientific and Industrial Research was 2001\\. \\textbackslash textbackslash n\\textbackslash textbackslash nThe answer is 2001.\", \"humanjudge_result\": \"Incorrect\" } { \"dataset\": \"MMLU-Redux_enh\", \"question\": \"If you wanted to find the global distribution of coal , you would use a Answer Choices: (I) reference map. (II) topographic map. (III) thematic map. (IV) location map.\", \"question_type\": \"multiple choice\", \"correct_answer\": \"III\", \"llm_output\": \"The answer is (III) thematic map.\", \"humanjudge_result\": \"Correct\" } { \"dataset\": \"Amazon\", \"question\": \"Lightsaber Action!!: This game is absolutely the best game I have ever seen! there is a little bit of violence, but most of the time you kill droids, not humans. My favorite weapon is the lightsaber, because you can deflect laser blasts\\. \\textbackslash textbackslash n Please identify the sentiment polarity of the sentence: positive or negative\", \"question_type\": \"classification\", \"correct_answer\": \"positive\", \"llm_output\": \"The answer is positive.\", \"humanjudge_result\": \"Correct\" } { \"dataset\": \"GSM8K\", \"question\": \"Daisy and Rose were enjoying their backyard pool with their dogs. If there are 24 legs/paws in the pool, how many dogs do Daisy and Rose have?\", \"question_type\": \"math\", \"correct_answer\": \"5\", \"llm_output\": \"To find the number of dogs Daisy and Rose have, we need to first determine the total number of people and then subtract that from the total number of legs/paws\\.n\\sSince each person has 2 legs, we can divide the total number of legs/paws by 2 to find the number of people\\.n24 legs/paws / 2 legs/person = 12 people\\n\\sSince Daisy and Rose are the only people in the pool, we can subtract them from the total number of people to find the number of dogs\\.n12 people - 2 people = 10 dogs\\n\\nTherefore, Daisy and Rose have 10 dogs\\.\". \"humanjudge_result\": \"Incorrect\" }" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "21" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.172, + 0.09, + 0.406, + 0.108 + ], + "angle": 0, + "content": "C Model Training Details" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.121, + 0.797, + 0.138 + ], + "angle": 0, + "content": "This section will further present additional information about the training of the xVerify model." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.152, + 0.406, + 0.168 + ], + "angle": 0, + "content": "C.1 Training Hyperparameters" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.178, + 0.825, + 0.22 + ], + "angle": 0, + "content": "The xVerify model is trained using the QLoRA method, with consistent hyperparameter settings across all base models. The training is carried out on multiple GPU servers. Table 17 presents the key training hyperparameters." + }, + { + "type": "table", + "bbox": [ + 0.319, + 0.253, + 0.656, + 0.41 + ], + "angle": 0, + "content": "
HyperparameterSetting
Per Device Train Batch Size1
Gradient Accumulation Steps8
Learning Rate1.0e-4
Num Train Epochs1.0
LrScheduler Typecosine
Warmup Ratio0.1
Bf16true
Ddp Timeout180000000
Lora Rank8
" + }, + { + "type": "table_caption", + "bbox": [ + 0.32, + 0.231, + 0.677, + 0.247 + ], + "angle": 0, + "content": "Table 17: Hyperparameter settings for model training." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.432, + 0.377, + 0.448 + ], + "angle": 0, + "content": "C.2 Original Model Details" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.458, + 0.825, + 0.501 + ], + "angle": 0, + "content": "This paper uses 14 original models of different parameter scales and types for training on the VAR dataset. Table 18 presents the relevant information for all xVerify models and their corresponding original models." + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.511, + 0.825, + 0.54 + ], + "angle": 0, + "content": "Table 18: Details of Original Models and Corresponding xVerify Models. Sorted by Original Model Name." + }, + { + "type": "table", + "bbox": [ + 0.228, + 0.546, + 0.77, + 0.77 + ], + "angle": 0, + "content": "
Original Model#Para.TypeContext LengthxVerify Model
Gemma-2-2B-it2BInstruct8KxVerify-2B-I
Gemma-2-9B-it9BInstruct8KxVerify-9B-I
Gemma-2-27B-it27BInstruct8KxVerify-27B-I
GLM-4-9B-Chat9BChat128KxVerify-9B-C
Llama-3.2-1B-Instruct1BInstruct128KxVerify-1B-I
Llama-3.2-3B-Instruct3BInstruct128KxVerify-3B-Ia
Llama-3.1-8B-Instruct8BInstruct128KxVerify-8B-I
Phi-414BInstruct16kxVerify-14B-Ib
Qwen2.5-0.5B-Instruct0.5BInstruct128KxVerify-0.5B-I
Qwen2.5-1.5B-Instruct1.5BInstruct128KxVerify-1.5B-I
Qwen2.5-3B-Instruct3BInstruct128KxVerify-3B-Ib
Qwen2.5-7B-Instruct7BInstruct128KxVerify-7B-I
Qwen2.5-14B-Instruct14BInstruct128KxVerify-14B-Ia
Qwen2.5-32B-Instruct32BInstruct128KxVerify-32B-I
" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.796, + 0.283, + 0.814 + ], + "angle": 0, + "content": "D Prompts" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.827, + 0.744, + 0.844 + ], + "angle": 0, + "content": "This section will present all the prompt templates used in the experiments of this paper." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.858, + 0.502, + 0.874 + ], + "angle": 0, + "content": "D.1 Prompts for Generating LLM Responses" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.884, + 0.825, + 0.914 + ], + "angle": 0, + "content": "The prompt templates used to generate LLM responses are illustrated in Figures 4 to 7. Each template consists of four fields that need to be populated: \"task_type\", \"task_description\", \"examples\", and" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "22" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.171, + 0.092, + 0.827, + 0.218 + ], + "angle": 0, + "content": "\"question\". The \"task_type\" and \"task_description\" fields are determined based on the type of question. For instance, for questions from the GPQA dataset, \"task_type\" is set to \"multidisciplinary question\", and \"task_description\" is set to \"Please choose the answer from options A to D, corresponding to the question.\" During dataset preprocessing, we design appropriate \"task_type\" and \"task_description\" values for each dataset. The \"examples\" field is filled according to the selected prompting strategy, either 0-shot or 5-shot. In the 0-shot setting, this field is left empty, while in the 5-shot setting, it is populated with five example question-answer pairs that are similar to the target \"question\". The \"question\" field contains the specific query to be answered by the LLM. Examples of the \"examples\" and \"question\" fields are shown in Figures 8 and 9, respectively." + }, + { + "type": "code", + "bbox": [ + 0.197, + 0.24, + 0.51, + 0.281 + ], + "angle": 0, + "content": "You are an expert in {task_type}, {task_description} \n{examples} \n{question}" + }, + { + "type": "image_caption", + "bbox": [ + 0.305, + 0.306, + 0.692, + 0.322 + ], + "angle": 0, + "content": "Figure 4: Few-shot prompt for generating LLM responses." + }, + { + "type": "code", + "bbox": [ + 0.197, + 0.353, + 0.519, + 0.418 + ], + "angle": 0, + "content": "You are an expert in {task_type}, {task_description} \n{examples} \n{question} \nEnd your final answer with 'The answer is ." + }, + { + "type": "image_caption", + "bbox": [ + 0.28, + 0.443, + 0.718, + 0.459 + ], + "angle": 0, + "content": "Figure 5: Few-shot-restrict prompt for generating LLM responses." + }, + { + "type": "code", + "bbox": [ + 0.197, + 0.49, + 0.51, + 0.556 + ], + "angle": 0, + "content": "You are an expert in {task_type}, {task_description} \n{examples} \n{question} \nLet's think step by step." + }, + { + "type": "image_caption", + "bbox": [ + 0.292, + 0.581, + 0.704, + 0.596 + ], + "angle": 0, + "content": "Figure 6: Few-shot-cot prompt for generating LLM responses." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.621, + 0.442, + 0.635 + ], + "angle": 0, + "content": "D.2 Prompts for GPT-4o Annotation" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.646, + 0.825, + 0.703 + ], + "angle": 0, + "content": "The prompt templates used for annotating the collected LLM question-answer pairs with GPT-4o during the construction of the VAR dataset are shown in Figures 10 and 11. Both of these prompt templates employ the Chain-of-Thought (CoT) strategy to ensure the accuracy of the annotations generated by GPT-4o." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.719, + 0.442, + 0.734 + ], + "angle": 0, + "content": "D.3 Prompts for Data Augmentation" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.744, + 0.827, + 0.773 + ], + "angle": 0, + "content": "In constructing the VAR dataset, two prompt templates used to guide GPT-4o in augmenting mathematical question samples are presented in Figures 12 and 13." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.789, + 0.396, + 0.804 + ], + "angle": 0, + "content": "D.4 Prompts for Judge Model" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.815, + 0.825, + 0.913 + ], + "angle": 0, + "content": "In the experiments of this paper, the prompts used for all judge models were constructed based on the official templates provided by their respective developers. However, for some judge models, the official prompt templates were not fully compatible with the evaluation tasks in this paper, so other similar prompt templates were used. Specifically, Figure 14 shows the prompt template used by GPT-4o as Judge, Figure 15 shows the prompt template used by GPT-4o as Judge (CoT), Figure 16 shows the prompt template used by JudgeLM series models and PandaLM-7B-v1, Figure 17 shows the prompt template used by Auto-J series models, and Figure 18 shows the prompt template used" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "23" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.197, + 0.132, + 0.51, + 0.172 + ], + "angle": 0, + "content": "You are an expert in {task_type}, {task_description} \n{examples} \n{question}" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.183, + 0.341, + 0.197 + ], + "angle": 0, + "content": "Let's think step by step." + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.208, + 0.518, + 0.221 + ], + "angle": 0, + "content": "End your final answer with 'The answer is ." + }, + { + "type": "image_caption", + "bbox": [ + 0.267, + 0.247, + 0.729, + 0.262 + ], + "angle": 0, + "content": "Figure 7: Few-shot-cot-restrict prompt for generating LLM responses." + }, + { + "type": "title", + "bbox": [ + 0.199, + 0.345, + 0.437, + 0.358 + ], + "angle": 0, + "content": "***** Start In-Context Examples ****" + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.359, + 0.8, + 0.434 + ], + "angle": 0, + "content": "Q: A late game rally by Washington led them to the Eagles' 26 yard line. A shot to the end zone by Robert Griffin III would be intercepted by Brandon Boykin, clinching an Eagles win. The Eagles would move to 6-5. This is the Eagles first win at Lincoln Financial Field since Week 4 of the 2012 season, because prior to this game, the Eagles had never won a game in their home stadium in 414 days since that same week, snapping a 10-game losing streak at home with this win. How many more wins than losses did the Eagles have after this game?" + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.435, + 0.316, + 0.446 + ], + "angle": 0, + "content": "A: The answer is 1." + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.459, + 0.8, + 0.535 + ], + "angle": 0, + "content": "Q: The population of Sevastopol proper is 418,987 (01.01.16), making it the largest in the Crimean Peninsula. The city's agglomeration has about 600,000 people (2015). According to the Ukrainian Census (2001), the ethnic groups of Sevastopol include Russians (71.6%), Ukrainians (22.4%), Belarusians (1.6%), Tatars (0.7%), Crimean Tatars (0.5%), Armenians (0.3%), Jews (0.3%), Moldovans (0.2%), and Azerbaijani people (0.2%). Which ethnic has a higher percentage of the population in Sevastopol: Russians or Armenians?" + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.536, + 0.36, + 0.547 + ], + "angle": 0, + "content": "A: The answer is Russians." + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.56, + 0.8, + 0.647 + ], + "angle": 0, + "content": "Q: the most common crimes in the ACT are property related crimes, unlawful entry with intent and motor vehicle theft. They affected 2,304 and 966 people (580 and 243 per 100,000 persons respectively). Homicide and related offences—murder, attempted murder and manslaughter, but excluding driving causing death and conspiracy to murder—affect 1.0 per 100,000 persons, which is below the national average of 1.9 per 100,000. Rates of sexual assault (64.4 per 100,000 persons) are also below the national average (98.5 per 100,000). Which was there a higher national average for, homicide and related offences or sexual assault?" + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.649, + 0.389, + 0.66 + ], + "angle": 0, + "content": "A: The answer is sexual assault." + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.673, + 0.8, + 0.723 + ], + "angle": 0, + "content": "Q: In the county, the population was spread out with \\(21.7\\%\\) under the age of 18, \\(8.5\\%\\) from 18 to 24, \\(26.9\\%\\) from 25 to 44, \\(27.7\\%\\) from 45 to 64, and \\(15.0\\%\\) who were 65 years of age or older. The median age was 40 years. For every 100 females, there were 94.4 males. For every 100 females age 18 and over, there were 98.7 males. How many percent were not from 45 to 64?" + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.724, + 0.334, + 0.735 + ], + "angle": 0, + "content": "A: The answer is 72.3." + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.748, + 0.8, + 0.811 + ], + "angle": 0, + "content": "Q: The median age in the city was 35.1 years. \\(24.2\\%\\) of residents were under the age of 18; \\(7.9\\%\\) were between the ages of 18 and 24; \\(33.8\\%\\) were from 25 to 44; \\(24.6\\%\\) were from 45 to 64; and \\(9.5\\%\\) were 65 years of age or older. The gender makeup of the city was \\(48.6\\%\\) male and \\(51.4\\%\\) females. How many more people, in terms of percentage, were in the largest age group compared to the second smallest?" + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.812, + 0.334, + 0.823 + ], + "angle": 0, + "content": "A: The answer is 24.3." + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.824, + 0.433, + 0.837 + ], + "angle": 0, + "content": "***** End In-Context Examples ****" + }, + { + "type": "image_caption", + "bbox": [ + 0.365, + 0.863, + 0.631, + 0.878 + ], + "angle": 0, + "content": "Figure 8: Example of \"examples\" fields." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "24" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.197, + 0.101, + 0.8, + 0.171 + ], + "angle": 0, + "content": "Q: Let \\(ABCD\\) be a tetrahedron such that \\(AB = CD = \\sqrt{41}\\), \\(AC = BD = \\sqrt{80}\\), and \\(BC = AD = \\sqrt{89}\\). There exists a point \\(I\\) inside the tetrahedron such that the distances from \\(I\\) to each of the faces of the tetrahedron are all equal. This distance can be written in the form \\(\\frac{m\\sqrt{n}}{p}\\), where \\(m, n\\), and \\(p\\) are positive integers, \\(m\\) and \\(p\\) are relatively prime, and \\(n\\) is not divisible by the square of any prime. Find \\(m + n + p\\)." + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.172, + 0.217, + 0.182 + ], + "angle": 0, + "content": "A:" + }, + { + "type": "image_caption", + "bbox": [ + 0.368, + 0.207, + 0.629, + 0.223 + ], + "angle": 0, + "content": "Figure 9: Example of \"question\" fields." + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.248, + 0.8, + 0.276 + ], + "angle": 0, + "content": "You are a diligent and precise assistant tasked with evaluating the correctness of responses. Think step by step as you make your evaluation." + }, + { + "type": "text", + "bbox": [ + 0.196, + 0.287, + 0.8, + 0.326 + ], + "angle": 0, + "content": "You will receive a question, an output sentence, and the correct answer. Your task is to determine if the output sentence accurately answers the question based on the provided correct answer. Think step by step and respond with either [Correct] or [Incorrect]." + }, + { + "type": "title", + "bbox": [ + 0.198, + 0.337, + 0.338, + 0.349 + ], + "angle": 0, + "content": "Special considerations:" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.35, + 0.8, + 0.388 + ], + "angle": 0, + "content": "1. **Multiple Answers**: If the output contains multiple answers, evaluate whether later answers modify or correct earlier ones. In such cases, compare the final answer with the correct answer. If the final answer is unclear or incorrect, respond with [Incorrect]." + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.388, + 0.8, + 0.413 + ], + "angle": 0, + "content": "2. **Mathematical Problems**: If the formats differ but the answers are mathematically equivalent, respond with [Correct]." + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.413, + 0.8, + 0.438 + ], + "angle": 0, + "content": "3. **Explicit Options**: If the question provides explicit candidate answers, the output will be considered correct if it clearly indicates the correct option's code or the correct option's content." + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.438, + 0.8, + 0.464 + ], + "angle": 0, + "content": "4. **No Explicit Options**: If the question does not provide explicit options, the output must align with the correct answer in content and meaning to be considered [Correct]." + }, + { + "type": "list", + "bbox": [ + 0.198, + 0.35, + 0.8, + 0.464 + ], + "angle": 0, + "content": null + }, + { + "type": "code", + "bbox": [ + 0.198, + 0.465, + 0.553, + 0.58 + ], + "angle": 0, + "content": "Please present your response in the following JSON format: { \"reasoning\": \"Your step-by-step reasoning here.\", \"judgment\": \"Correct or Incorrect\" } Question: \"\"{question}\"\" Output sentence: \"\"{output}\"\" Correct answer: {answer}" + }, + { + "type": "image_caption", + "bbox": [ + 0.353, + 0.613, + 0.644, + 0.629 + ], + "angle": 0, + "content": "Figure 10: Prompt I for GPT-4o annotation." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.657, + 0.827, + 0.7 + ], + "angle": 0, + "content": "by Prometheus series models. The official prompt template for the CompassJudger-1 series models corresponds to pairwise evaluation, so the prompt template used by this series is the same as that for the xVerify model, as shown in Figure 19." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.718, + 0.357, + 0.734 + ], + "angle": 0, + "content": "D.5 Prompts for xVerify" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.745, + 0.828, + 0.803 + ], + "angle": 0, + "content": "Figure 19 shows the prompt template used to construct the input for the xVerify model. This template is used both for training and evaluation of the xVerify model. Specifically, \"question,\" \"output,\" and \"answer\" correspond to the question content, the LLM response, and the reference answer, respectively." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.824, + 0.524, + 0.842 + ], + "angle": 0, + "content": "E Supplementary Experimental Results" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.857, + 0.571, + 0.872 + ], + "angle": 0, + "content": "E.1 Evaluation Accuracy Results of All xVerify Models" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.884, + 0.826, + 0.914 + ], + "angle": 0, + "content": "Tables 19 and 20 present the performance of all \\(14\\mathrm{x}\\) Verify models on the test set and the generalization set, respectively. Overall, each xVerify model achieves an F1 score and accuracy exceeding \\(96.5\\%\\)" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "25" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.197, + 0.1, + 0.8, + 0.127 + ], + "angle": 0, + "content": "You are a diligent and precise assistant tasked with evaluating the correctness of responses. Think step by step as you make your evaluation." + }, + { + "type": "text", + "bbox": [ + 0.196, + 0.138, + 0.8, + 0.163 + ], + "angle": 0, + "content": "We request your feedback on whether the model's response correctly answers the user question above. Follow these steps to make your evaluation:" + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.164, + 0.525, + 0.176 + ], + "angle": 0, + "content": "1. Think step by step: Read the user question carefully." + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.177, + 0.738, + 0.188 + ], + "angle": 0, + "content": "2. Think step by step: Review the reference answer and understand the key points it covers." + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.189, + 0.662, + 0.201 + ], + "angle": 0, + "content": "3. Think step by step: Compare the model's answer with the reference answer." + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.202, + 0.799, + 0.227 + ], + "angle": 0, + "content": "4. Think step by step: Determine if the model's answer addresses the key points in the reference answer and correctly answers the question." + }, + { + "type": "list", + "bbox": [ + 0.198, + 0.164, + 0.799, + 0.227 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.196, + 0.238, + 0.8, + 0.264 + ], + "angle": 0, + "content": "First, provide your reasoning in detail. Then, clearly state your judgment as either \"Correct\" or \"Incorrect.\"" + }, + { + "type": "code", + "bbox": [ + 0.197, + 0.265, + 0.553, + 0.379 + ], + "angle": 0, + "content": "Please present your response in the following JSON format: \n{ \"reasoning\": \"Your step-by-step reasoning here.\", \"judgment\": \"Correct or Incorrect\" \n} \nQuestion: {question} \nReference Answer: {answer} \nModel's Answer: {output}" + }, + { + "type": "image_caption", + "bbox": [ + 0.35, + 0.404, + 0.645, + 0.419 + ], + "angle": 0, + "content": "Figure 11: Prompt II for GPT-4o annotation." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.447, + 0.825, + 0.476 + ], + "angle": 0, + "content": "on the test set and over \\(95.52\\%\\) on the generalization set. These results demonstrate not only the effectiveness of the xVerify models for evaluation tasks but also the high quality of the VAR dataset." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.481, + 0.825, + 0.58 + ], + "angle": 0, + "content": "A comparison between the results on the two datasets shows that the performance on the generalization set experiences a slight decline relative to the test set, with the decrease not exceeding \\(1.6\\%\\). Moreover, models with larger parameter sizes exhibit smaller performance drops. This indicates that the xVerify models possess strong generalization capabilities, which further improve with an increase in parameter scale. Additionally, it is observed across both datasets that while the performance of xVerify models generally enhances with the increment of parameter size, beyond a certain threshold, further increases in parameter scale do not lead to additional performance gains." + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.602, + 0.825, + 0.629 + ], + "angle": 0, + "content": "Table 19: Evaluation Accuracy Results on the Test Set: All xVerify Models. The best performance in each column is shown in **bold**, and the second-best performance is underlined." + }, + { + "type": "table", + "bbox": [ + 0.175, + 0.63, + 0.822, + 0.814 + ], + "angle": 0, + "content": "
xVerify ModelMultiple ChoiceMathShort AnswerClassificationTotal
F1Acc.F1Acc.F1Acc.F1Acc.F1Acc.
xVerify-0.5B-I97.78%97.90%93.74%94.64%96.72%97.49%99.71%99.59%96.69%96.85%
xVerify-1B-I97.22%97.35%94.76%95.45%96.06%96.97%99.71%99.59%96.77%96.91%
xVerify-1.5B-I97.85%97.96%95.10%95.75%96.05%96.97%99.63%99.49%97.05%97.17%
xVerify-2B-I97.93%98.02%95.06%95.71%96.06%96.97%99.78%99.69%97.09%97.21%
xVerify-3B-Ia97.73%97.84%95.00%95.67%96.17%97.06%99.71%99.59%97.02%97.14%
xVerify-3B-Ib97.31%97.41%95.65%96.18%96.38%97.23%99.78%99.69%97.17%97.27%
xVerify-7B-I97.75%97.84%95.94%96.44%96.51%97.32%99.78%99.69%97.41%97.50%
xVerify-8B-I97.92%98.02%95.34%95.97%96.05%96.97%99.71%99.59%97.17%97.29%
xVerify-9B-C98.29%98.38%95.26%95.88%96.06%96.97%99.78%99.69%97.25%97.37%
xVerify-9B-I97.43%97.53%95.75%96.27%96.06%96.97%99.78%99.69%97.19%97.29%
xVerify-14B-Ia97.49%97.59%95.73%96.22%95.41%96.46%99.63%99.49%97.06%97.16%
xVerify-14B-Ib97.67%97.78%96.10%96.57%95.74%96.72%99.71%99.59%97.31%97.40%
xVerify-27B-I97.81%97.90%95.46%96.01%96.19%97.06%99.56%99.38%97.15%97.26%
xVerify-32B-I97.81%97.90%95.88%96.31%96.18%97.06%99.71%99.59%97.32%97.40%
" + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.843, + 0.753, + 0.859 + ], + "angle": 0, + "content": "E.2 Computational Efficiency and Operational Cost of xVerify and Judge Models" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.87, + 0.825, + 0.913 + ], + "angle": 0, + "content": "Table 21 displays the running time performance of the xVerify model and other judge models. Each model was evaluated using 200 randomly selected samples per question type from the generalization set, with running times measured in seconds. This data provides insights into the computational" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "26" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.198, + 0.215, + 0.796, + 0.253 + ], + "angle": 0, + "content": "You are an expert in mathematical calculations and data expressions. You are required to provide different equivalent forms of the standard answer for the following math problem." + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.254, + 0.348, + 0.266 + ], + "angle": 0, + "content": "Problem: {question}" + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.266, + 0.324, + 0.278 + ], + "angle": 0, + "content": "Answer: {answer}" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.291, + 0.277, + 0.303 + ], + "angle": 0, + "content": "Example 1:" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.304, + 0.802, + 0.341 + ], + "angle": 0, + "content": " Problem: Let $ \\alpha \\beta \\gamma be the radian measure of the smallest angle in a $3-4-5$ right triangle. Let $ \\alpha \\beta \\gamma be the radian measure of the smallest angle in a $7-24-25$ right triangle. Express $ \\alpha \\beta \\gamma in terms of $ \\alpha \\beta \\gamma$." + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.342, + 0.456, + 0.354 + ], + "angle": 0, + "content": " Answer: \\\\frac{\\backslashpi}{2} - 2\\alpha" + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.355, + 0.254, + 0.366 + ], + "angle": 0, + "content": "Output:" + }, + { + "type": "code", + "bbox": [ + 0.201, + 0.367, + 0.523, + 0.43 + ], + "angle": 0, + "content": "```\n\"\\\"json {\n \"answer1\": \"\\\"\\pi/2 - 2\\alpha\", \n \"answer2\": \"pi/2 - 2\\alpha\", \n \"answer3\": \"pi/2 - 2 * \\alpha\", \n \"answer4\": \"0.5 * \\pi - 2 * \\alpha\"\n}\");" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.442, + 0.277, + 0.454 + ], + "angle": 0, + "content": "Example 2:" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.455, + 0.81, + 0.504 + ], + "angle": 0, + "content": "Problem: A volcano erupts and spews ash into the sky. The ash cloud spreads out in a diameter eighteen times as far as the distance it shot up into the sky. If the ashes erupted three hundred feet into the sky, what was the radius of the ash cloud in feet?" + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.506, + 0.294, + 0.516 + ], + "angle": 0, + "content": "Answer: 2700" + }, + { + "type": "code", + "bbox": [ + 0.199, + 0.518, + 0.57, + 0.606 + ], + "angle": 0, + "content": "Output:\n```\n\"\\\"json {\n \"answer1\": \"2.7×10^3\",\n \"answer2\": \"2700.0\",\n \"answer3\": \"2.7 \\times times 10^3\",\n \"answer4\": \"$2.7 \\times times 10^3$\",\n \"answer5\": \"Two thousand seven hundred\"}''\n```" + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.618, + 0.293, + 0.629 + ], + "angle": 0, + "content": "Please note:" + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.631, + 0.705, + 0.643 + ], + "angle": 0, + "content": "1. You need to provide 3 to 5 different standard forms of the answer" + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.644, + 0.771, + 0.667 + ], + "angle": 0, + "content": "2. Each different form must be equivalent to the standard answer, i.e., it should still be a correct and valid answer." + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.668, + 0.763, + 0.693 + ], + "angle": 0, + "content": "3. You may use LaTeX, scientific notation, or other standard mathematical expressions." + }, + { + "type": "code", + "bbox": [ + 0.199, + 0.694, + 0.67, + 0.743 + ], + "angle": 0, + "content": "4. Please follow the JSON format below for the output:\n```\n\"\\\"json {\n \"answer1\": \"xxx\", \"answer2\": \"xxx\", \"answer3\": \"xxx\", ...\n}...\"" + }, + { + "type": "image_caption", + "bbox": [ + 0.28, + 0.78, + 0.717, + 0.795 + ], + "angle": 0, + "content": "Figure 12: Prompt for Generating Alternative Reference Answers." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "27" + } + ], + [ + { + "type": "code", + "bbox": [ + 0.171, + 0.215, + 0.828, + 0.763 + ], + "angle": 0, + "content": "You are an expert in mathematical calculations and data expressions. For an answer to a specific mathematical problem, you are required to provide equivalent and different expressions of the mathematical result. Answer: {output} \nExample 1: Answer: The answer is \\(\\beta = \\backslash\\) frac{pi{2}-2\\alpha}. Output: \"'json { \"answer1\": \"The answer is \\(\\backslash\\) pi/2 - 2\\alpha}. , \"answer2\": \"The answer is pi/2 - 2\\alpha}. , \"answer3\": \"The answer is pi/2 - 2\\* alpha.\", \"answer4\": \"The answer is \\(0.5*\\) pi-2\\* alpha.\" }\"\" \nExample 2: Answer: The answer is 2700 feet. Output: \"'json { \"answer1\": \"The answer is \\(2.7\\times 10^{-3}\\) feet.\", \"answer2\": \"The answer is 2700.0 feet.\", \"answer3\": \"The answer is 2.7 times \\(10^{-3}\\) feet.\", \"answer4\": \"The answer is \\(\\$ 2.7\\) times \\(10^{-3}\\{3\\}\\) feet.\", \"answer5\": \"The answer is Two thousand seven hundred feet.\" }\"\" \nPlease note: 1. You need to provide 3 to 5 different expressions, each replacing the mathematical result with an equivalent and different form. 2. Each expression must be exactly equivalent to the target answer to ensure its correctness. 3. You can use LaTeX, scientific notation, or other standard mathematical formats. 4. Please output the result in the following JSON format: \"'json { \"answer1\": \"The answer is xxx\", \"answer2\": \"The answer is xxx\", \"answer3\": \"The answer is xxx\", \"answer4\": \"The answer is xxx\", \"answer5\": \"The answer is xxx\" }\"\"" + }, + { + "type": "image_caption", + "bbox": [ + 0.269, + 0.773, + 0.726, + 0.79 + ], + "angle": 0, + "content": "Figure 13: Prompt for Generating Diverse Final Answer Expressions." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "28" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.197, + 0.129, + 0.8, + 0.182 + ], + "angle": 0, + "content": "You are a diligent and precise assistant tasked with evaluating the correctness of responses. You will receive a question, an output sentence, and the correct answer. Your task is to determine if the output sentence accurately answers the question based on the provided correct answer. Respond with either [Correct] or [Incorrect]." + }, + { + "type": "title", + "bbox": [ + 0.198, + 0.193, + 0.338, + 0.205 + ], + "angle": 0, + "content": "Special considerations:" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.206, + 0.799, + 0.244 + ], + "angle": 0, + "content": "1. **Multiple Answers**: If the output contains multiple answers, evaluate whether later answers modify or correct earlier ones. In such cases, compare the final answer with the correct answer. If the final answer is unclear or incorrect, respond with [Incorrect]." + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.244, + 0.8, + 0.269 + ], + "angle": 0, + "content": "2. **Mathematical Problems**: If the formats differ but the answers are mathematically equivalent, respond with [Correct]." + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.269, + 0.799, + 0.294 + ], + "angle": 0, + "content": "3. **Explicit Options**: If the question provides explicit candidate answers, the output will be considered correct if it clearly indicates the correct option's code or the correct option's content." + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.295, + 0.798, + 0.319 + ], + "angle": 0, + "content": "4. **No Explicit Options**: If the question does not provide explicit options, the output must align with the correct answer in content and meaning to be considered [Correct]." + }, + { + "type": "list", + "bbox": [ + 0.198, + 0.206, + 0.8, + 0.319 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.32, + 0.553, + 0.332 + ], + "angle": 0, + "content": "Please present your response in the following JSON format:" + }, + { + "type": "code", + "bbox": [ + 0.198, + 0.333, + 0.445, + 0.372 + ], + "angle": 0, + "content": "{\" judgement\": \"Correct or Incorrect\"}" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.381, + 0.363, + 0.394 + ], + "angle": 0, + "content": "Question: \"\"{question}\"\"" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.395, + 0.406, + 0.407 + ], + "angle": 0, + "content": "Output sentence: \"\"{response}\"\"" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.408, + 0.368, + 0.42 + ], + "angle": 0, + "content": "Correct answer: {reference}" + }, + { + "type": "image_caption", + "bbox": [ + 0.365, + 0.445, + 0.631, + 0.461 + ], + "angle": 0, + "content": "Figure 14: Prompt for GPT-4o as Judge." + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.537, + 0.8, + 0.59 + ], + "angle": 0, + "content": "You are a diligent and precise assistant tasked with evaluating the correctness of responses. Think step by step as you make your evaluation. You will receive a question, an output sentence, and the correct answer. Your task is to determine if the output sentence accurately answers the question based on the provided correct answer. Think step by step and respond with either [Correct] or [Incorrect]." + }, + { + "type": "title", + "bbox": [ + 0.199, + 0.601, + 0.338, + 0.613 + ], + "angle": 0, + "content": "Special considerations:" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.614, + 0.799, + 0.651 + ], + "angle": 0, + "content": "1. **Multiple Answers**: If the output contains multiple answers, evaluate whether later answers modify or correct earlier ones. In such cases, compare the final answer with the correct answer. If the final answer is unclear or incorrect, respond with [Incorrect]." + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.652, + 0.8, + 0.677 + ], + "angle": 0, + "content": "2. **Mathematical Problems**: If the formats differ but the answers are mathematically equivalent, respond with [Correct]." + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.677, + 0.799, + 0.702 + ], + "angle": 0, + "content": "3. **Explicit Options**: If the question provides explicit candidate answers, the output will be considered correct if it clearly indicates the correct option's code or the correct option's content." + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.702, + 0.799, + 0.727 + ], + "angle": 0, + "content": "4. **No Explicit Options**: If the question does not provide explicit options, the output must align with the correct answer in content and meaning to be considered [Correct]." + }, + { + "type": "list", + "bbox": [ + 0.198, + 0.614, + 0.8, + 0.727 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.728, + 0.553, + 0.74 + ], + "angle": 0, + "content": "Please present your response in the following JSON format:" + }, + { + "type": "code", + "bbox": [ + 0.198, + 0.741, + 0.523, + 0.793 + ], + "angle": 0, + "content": "\"reasoning\": \"Your step-by-step reasoning here.\", \"judgement\": \"Correct or Incorrect\"" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.801, + 0.363, + 0.815 + ], + "angle": 0, + "content": "Question: \"\"{question}\"\"" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.815, + 0.406, + 0.828 + ], + "angle": 0, + "content": "Output sentence: \"\"{response}\"" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.828, + 0.368, + 0.84 + ], + "angle": 0, + "content": "Correct answer: {reference}" + }, + { + "type": "image_caption", + "bbox": [ + 0.342, + 0.865, + 0.655, + 0.881 + ], + "angle": 0, + "content": "Figure 15: Prompt for GPT-4o as Judge (CoT)." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "29" + } + ], + [ + { + "type": "code", + "bbox": [ + 0.197, + 0.099, + 0.8, + 0.305 + ], + "angle": 0, + "content": "You are a helpful and precise assistant for checking the quality of the answer. \n[Question] \n{question} \n[Reference Answer] \n{reference} \n[Model's Answer] \n{response} \n[System] \nWe would like to request your feedback on the performance of the model's response to the user question displayed above. \nBased on the reference answer, please rate the accuracy of the response. The model receives an overall score on a scale of 1 to 10, where a higher score indicates better overall performance. \nPlease first output a single line containing only the score. In the subsequent line, please provide a comprehensive explanation of your evaluation, avoiding any potential bias. \n```java\n```java\nYou are a helpful and precise assistant for checking the quality of the answer. \n[Question] \n{question} \n[Reference Answer] \n{reference} \n[Model's Answer] \n{response} \n[System]" + }, + { + "type": "image_caption", + "bbox": [ + 0.389, + 0.328, + 0.608, + 0.344 + ], + "angle": 0, + "content": "Figure 16: Prompt for JudgeLM." + }, + { + "type": "code", + "bbox": [ + 0.199, + 0.372, + 0.8, + 0.4 + ], + "angle": 0, + "content": "[INST] Write critiques for a submitted response on a given user's query, incorporating the correct answer as a reference, and grade the response accordingly:" + }, + { + "type": "code", + "bbox": [ + 0.197, + 0.41, + 0.8, + 0.576 + ], + "angle": 0, + "content": "[BEGIN DATA] \n\\*\\*\\* \n[Query]: {question} \n\\*\\*\\* \n[Correct Answer]: {reference} \n\\*\\*\\* \n[Response]: {response} \n\\*\\*\\* \n[END DATA] \nWrite critiques for this response. After that, you should give a final rating for the response on a scale of 1 to 10 by strictly following this format: \"[rating]\", for example: \"Rating: [[5]]\". [/INST]" + }, + { + "type": "image_caption", + "bbox": [ + 0.398, + 0.599, + 0.599, + 0.615 + ], + "angle": 0, + "content": "Figure 17: Prompt for Auto-J." + }, + { + "type": "table_caption", + "bbox": [ + 0.172, + 0.64, + 0.825, + 0.668 + ], + "angle": 0, + "content": "Table 20: Evaluation Accuracy Results on the Generalization Set: All xVerify Models. The best performance in each column is shown in **bold**, and the second-best performance is **underlined**." + }, + { + "type": "table", + "bbox": [ + 0.177, + 0.669, + 0.823, + 0.853 + ], + "angle": 0, + "content": "
xVerify ModelMultiple ChoiceMathShort AnswerClassificationTotal
F1Acc.F1Acc.F1Acc.F1Acc.F1Acc.
xVerify-0.5B-I96.49%96.10%80.00%91.94%96.95%97.00%99.03%98.53%95.29%95.53%
xVerify-1B-I96.10%95.66%82.45%92.51%97.32%97.35%98.92%98.37%95.43%95.62%
xVerify-1.5B-I96.76%96.38%83.58%93.12%97.46%97.49%98.88%98.29%95.85%96.03%
xVerify-2B-I96.27%95.82%82.11%92.51%97.60%97.63%98.98%98.45%95.57%95.75%
xVerify-3B-Ia96.44%95.99%86.10%94.25%97.31%97.35%99.03%98.53%96.11%96.27%
xVerify-3B-Ib96.21%95.71%86.20%94.15%97.60%97.63%99.03%98.53%96.08%96.23%
xVerify-7B-I96.16%95.66%87.86%94.87%97.45%97.49%98.93%98.37%96.22%96.37%
xVerify-8B-I96.67%96.27%86.76%94.61%97.45%97.49%99.03%98.53%96.33%96.49%
xVerify-9B-C97.00%96.66%87.08%94.71%97.45%97.49%98.98%98.45%96.45%96.61%
xVerify-9B-I96.06%95.55%87.47%94.76%97.53%97.56%99.13%98.68%96.23%96.38%
xVerify-14B-Ia96.11%95.60%90.20%95.74%97.32%97.35%99.13%98.68%96.53%96.65%
xVerify-14B-Ib96.35%95.88%87.88%94.92%97.45%97.49%98.93%98.37%96.30%96.44%
xVerify-27B-I96.01%95.49%85.64%93.99%97.32%97.35%99.13%98.68%95.93%96.09%
xVerify-32B-I96.22%95.71%90.09%95.59%97.32%97.35%99.03%98.53%96.50%96.60%
" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.884, + 0.825, + 0.914 + ], + "angle": 0, + "content": "efficiency of each model under uniform testing conditions, thereby facilitating a comparative analysis of their real-time processing capabilities and scalability in practical applications." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.51, + 0.948 + ], + "angle": 0, + "content": "30" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.198, + 0.101, + 0.8, + 0.127 + ], + "angle": 0, + "content": "You are a fair judge assistant tasked with providing clear, objective feedback based on specific criteria, ensuring each assessment reflects the absolute standards set for performance.\"" + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.128, + 0.329, + 0.14 + ], + "angle": 0, + "content": "Task Description:" + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.14, + 0.799, + 0.165 + ], + "angle": 0, + "content": "An instruction (might include an Input inside it), a response to evaluate, a reference answer that gets a score of 5, and a score rubric representing a evaluation criteria are given." + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.165, + 0.799, + 0.19 + ], + "angle": 0, + "content": "1. Write a detailed feedback that assess the quality of the response strictly based on the given score rubric, not evaluating in general." + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.19, + 0.799, + 0.215 + ], + "angle": 0, + "content": "2. After writing a feedback, write a score that is an integer between 1 and 5. You should refer to the score rubric." + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.215, + 0.799, + 0.242 + ], + "angle": 0, + "content": "3. The output format should look as follows: \"Feedback: (write a feedback for criteria) [RESULT] (an integer number between 1 and 5)\" 4. Please do not generate any other opening, closing, and explanations." + }, + { + "type": "list", + "bbox": [ + 0.199, + 0.165, + 0.799, + 0.242 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.253, + 0.384, + 0.265 + ], + "angle": 0, + "content": "The instruction to evaluate:" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.266, + 0.263, + 0.28 + ], + "angle": 0, + "content": "{question}" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.291, + 0.352, + 0.303 + ], + "angle": 0, + "content": "##Response to evaluate:" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.304, + 0.264, + 0.317 + ], + "angle": 0, + "content": "{response}" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.328, + 0.394, + 0.341 + ], + "angle": 0, + "content": "Reference Answer (Score 5):" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.341, + 0.268, + 0.355 + ], + "angle": 0, + "content": "{reference}" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.367, + 0.312, + 0.379 + ], + "angle": 0, + "content": "Score Rubrics:" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.379, + 0.8, + 0.392 + ], + "angle": 0, + "content": "[Does the model demonstrate logical and effective reasoning in its responses?]" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.392, + 0.8, + 0.416 + ], + "angle": 0, + "content": "Score 1: The model's responses show a complete lack of logical reasoning, often resulting in irrelevant or nonsensical answers." + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.416, + 0.8, + 0.442 + ], + "angle": 0, + "content": "Score 2: The model occasionally shows signs of logical reasoning but generally struggles to provide coherent or relevant responses." + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.442, + 0.8, + 0.467 + ], + "angle": 0, + "content": "Score 3: The model usually demonstrates basic reasoning capabilities, though it may not consistently apply logical principles or fully resolve complex issues." + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.467, + 0.8, + 0.492 + ], + "angle": 0, + "content": "Score 4: The model frequently exhibits strong reasoning skills, effectively addressing complex questions with minor inconsistencies or errors." + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.492, + 0.8, + 0.518 + ], + "angle": 0, + "content": "Score 5: The model consistently demonstrates advanced reasoning abilities, providing logically sound, coherent, and sophisticated responses to complex queries." + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.53, + 0.285, + 0.542 + ], + "angle": 0, + "content": "Feedback:" + }, + { + "type": "image_caption", + "bbox": [ + 0.382, + 0.579, + 0.615, + 0.594 + ], + "angle": 0, + "content": "Figure 18: Prompt for Prometheus." + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.615, + 0.8, + 0.667 + ], + "angle": 0, + "content": "You are a diligent and precise assistant tasked with evaluating the correctness of responses. You will receive a question, an output sentence, and the correct answer. Your task is to determine if the output sentence accurately answers the question based on the provided correct answer. Respond with either [Correct] or [Incorrect]." + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.679, + 0.338, + 0.69 + ], + "angle": 0, + "content": "Special considerations:" + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.691, + 0.8, + 0.729 + ], + "angle": 0, + "content": "1. **Multiple Answers**: If the output contains multiple answers, evaluate whether later answers modify or correct earlier ones. In such cases, compare the final answer with the correct answer. If the final answer is unclear or incorrect, respond with [Incorrect]." + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.729, + 0.8, + 0.754 + ], + "angle": 0, + "content": "2. **Mathematical Problems**: If the formats differ but the answers are mathematically equivalent, respond with [Correct]." + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.755, + 0.8, + 0.779 + ], + "angle": 0, + "content": "3. **Explicit Options**: If the question provides explicit candidate answers, the output will be considered correct if it clearly indicates the correct option's code or the correct option's content." + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.779, + 0.8, + 0.805 + ], + "angle": 0, + "content": "4. **No Explicit Options**: If the question does not provide explicit options, the output must align with the correct answer in content and meaning to be considered [Correct]." + }, + { + "type": "list", + "bbox": [ + 0.199, + 0.691, + 0.8, + 0.805 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.816, + 0.364, + 0.83 + ], + "angle": 0, + "content": "Question: \"\"{question}\"\"" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.83, + 0.393, + 0.842 + ], + "angle": 0, + "content": "Output sentence: \"\"{output}\"" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.842, + 0.354, + 0.855 + ], + "angle": 0, + "content": "Correct answer: {answer}" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.856, + 0.27, + 0.868 + ], + "angle": 0, + "content": "Judgement:" + }, + { + "type": "image_caption", + "bbox": [ + 0.397, + 0.893, + 0.6, + 0.909 + ], + "angle": 0, + "content": "Figure 19: Prompt for xVerify." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "31" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.17, + 0.092, + 0.827, + 0.189 + ], + "angle": 0, + "content": "All models were executed on GPUs with identical configurations. Specifically, Prometheus-8x7B-v2.0, JudgeLM-33B-v1.0, CompassJudger-1-32B, xVerify-27B-I, and xVerify-32B-I were deployed on two GPUs for inference, while the remaining models were deployed on a single GPU. From Table 21, it is evident that all xVerify models exhibit an overall average runtime within 100 seconds, whereas the overall average runtime for the other judge models exceeds 100 seconds. Moreover, for each question category, the models with the shortest evaluation times are the xVerify models. Thus, the xVerify models demonstrably surpass the other judge models in terms of evaluation efficiency." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.195, + 0.828, + 0.295 + ], + "angle": 0, + "content": "Table 22 presents the evaluation costs incurred when employing GPT-4o as the judge, based on assessments of 200 randomly selected samples per question type, along with the overall expenditure. Apart from the prerequisite deployment overhead, the cost of invoking the xVerify models for evaluation is substantially lower than that of GPT-4o. Additionally, compared to GPT-4o, which relies on remote server deployment, the locally deployed xVerify models offer higher invocation efficiency. Taken together, these results underscore that the xVerify models outperform the other judge models in both usage cost and evaluation efficiency." + }, + { + "type": "table_caption", + "bbox": [ + 0.17, + 0.313, + 0.825, + 0.354 + ], + "angle": 0, + "content": "Table 21: Running Time Comparison of xVerify Models and Other Judge Models (200 Samples per Question Type). The best performance in each column is shown in **bold**, and the second-best performance is underlined." + }, + { + "type": "table", + "bbox": [ + 0.174, + 0.355, + 0.825, + 0.647 + ], + "angle": 0, + "content": "
Method TypeMethodMultiple Choice (s)Math (s)Short Answer (s)Classification (s)Avg (s)
Judge ModelPandaLM-7B-v1304.5076.2476.9765.79130.88
Auto-J-Bilingual-6B1,570.441,802.711,194.081,148.321,428.89
Auto-J-13B3,055.003,622.702,807.231,903.002,846.98
Prometheus-7B-v2.01,173.80947.71706.74696.34881.15
Prometheus-8x7B-v2.01,557.101,128.081,132.84750.511,142.13
JudgeLM-7B-v1.0551.88469.10394.57348.05440.90
JudgeLM-13B-v1.0777.73598.19564.25529.60617.44
JudgeLM-33B-v1.01,041.831,018.37789.80762.99903.25
CompassJudger-1-1.5B189.45244.08139.50110.95171.00
CompassJudger-1-7B163.96568.72450.2080.58315.87
CompassJudger-1-14B346.80571.66217.86196.18333.13
CompassJudger-1-32B147.53258.10133.59152.11172.83
xVerifyxVerify-0.5B-I38.9741.2539.1238.8739.55
xVerify-1B-I33.9136.6333.4433.4734.36
xVerify-1.5B-I43.0546.8742.1742.0843.54
xVerify-2B-I38.4473.1639.2937.3847.07
xVerify-3B-Ia38.5444.5437.1143.0240.80
xVerify-3B-Ib46.9353.58106.0647.8463.60
xVerify-7B-I68.2495.5050.6651.6766.52
xVerify-8B-I78.0661.5745.3446.8257.95
xVerify-9B-C131.0770.1651.6652.5776.37
xVerify-9B-I54.2069.9149.4151.0656.15
xVerify-14B-Ia59.18114.9155.5054.8071.10
xVerify-14B-Ib61.17145.19116.4357.5595.09
xVerify-27B-I85.2889.4158.9961.0073.67
xVerify-32B-I131.0598.9964.7467.4590.56
" + }, + { + "type": "table_caption", + "bbox": [ + 0.216, + 0.665, + 0.779, + 0.681 + ], + "angle": 0, + "content": "Table 22: Total costs (in USD) of GPT-4o as Judge (200 Samples per Question Type)." + }, + { + "type": "table", + "bbox": [ + 0.208, + 0.687, + 0.791, + 0.733 + ], + "angle": 0, + "content": "
MethodMultiple Choice ($)Math ($)Short Answer ($)Classification ($)Total ($)
GPT-4o as Judge0.310.660.240.271.48
GPT-4o as Judge (CoT)0.551.000.420.482.45
" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "32" + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_10xxx/2504.10481/29785fca-1f46-4ab1-92e1-b0b4c9aee15b_origin.pdf b/data/2025/2504_10xxx/2504.10481/29785fca-1f46-4ab1-92e1-b0b4c9aee15b_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..2cbde60b6ef072f2a22aa99e795f9c4eceff5ab3 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10481/29785fca-1f46-4ab1-92e1-b0b4c9aee15b_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f27a33b6a08708914448793ce0b5f15ea841f591a5e9aa6f3da11ff06bd7f2bd +size 922930 diff --git a/data/2025/2504_10xxx/2504.10481/full.md b/data/2025/2504_10xxx/2504.10481/full.md new file mode 100644 index 0000000000000000000000000000000000000000..0febc5225ec70bf286649ca0038b11cb61dcd85c --- /dev/null +++ b/data/2025/2504_10xxx/2504.10481/full.md @@ -0,0 +1,812 @@ +Ding Chen $^{1*}$ Qingchen Yu $^{2*}$ Pengyuan Wang $^{2*}$ Wentao Zhang $^{3\dagger}$ + +Bo Tang² Feiyu Xiong² Xinchi Li¹ Minchuan Yang¹ Zhiyu Li²† + +1 Research Institute of China Telecom, Beijing, China + +2 MemTensor (Shanghai) Technology Co., Ltd. + +3 Center for Data Science, Peking University wentao.zhang@pku.edu.cn, lizy@iaar.ac.cn + +# Abstract + +With the release of the o1 model by OpenAI, reasoning models adopting slow thinking strategies have gradually emerged. As the responses generated by such models often include complex reasoning, intermediate steps, and self-reflection, existing evaluation methods are often inadequate. They struggle to determine whether the LLM output is truly equivalent to the reference answer, and also have difficulty identifying and extracting the final answer from long, complex responses. To address this issue, we propose xVerify, an efficient answer verifier for reasoning model evaluations. xVerify demonstrates strong capability in equivalence judgment, enabling it to effectively determine whether the answers produced by reasoning models are equivalent to reference answers across various types of objective questions. To train and evaluate xVerify, we construct the VAR dataset by collecting question-answer pairs generated by multiple LLMs across various datasets, leveraging multiple reasoning models and challenging evaluation sets designed specifically for reasoning model assessment. A multi-round annotation process is employed to ensure label accuracy. Based on the VAR dataset, we train multiple xVerify models of different scales. In evaluation experiments conducted on both the test set and generalization set, all xVerify models achieve overall F1 scores and accuracy exceeding $95\%$ . Notably, the smallest variant, xVerify-0.5B-I, outperforms all evaluation methods except GPT-4o, while xVerify-3B-Ib surpasses GPT-4o in overall performance. These results validate the effectiveness and generalizability of xVerify. All resources for xVerify are available at https://github.com/IAAR-Shanghai/xVerify. + +# 1 Introduction + +With the emergence of chain of thought (CoT) prompting [35], researchers began to explicitly encourage LLMs to generate intermediate reasoning steps, thereby enhancing their ability to handle complex tasks. Following this, OpenAI introduced the o1 model [15], which proposed the concepts of slow thinking and scaling at test time. Specifically, the model is trained to output a detailed reasoning process before generating a final answer, significantly improving its performance on complex tasks. Inspired by this paradigm, a variety of reasoning models have emerged, such as DeepSeek-R1 [3] trained with GRPO, OpenAI's o3-mini [26], and QwQ-32B [31]. However, the rise of reasoning models poses substantial challenges for evaluation. Since the outputs of these models often contain + +lengthy reasoning processes—potentially including redundant information, intermediate results, and even self-contradictions—it becomes significantly more difficult for evaluation tools to extract the final answer from such responses [2]. + +Developing evaluation methods tailored for LLM responses involving complex reasoning has become a key research focus. LLM reasoning is typically categorized into commonsense, logical, multihop, and mathematical reasoning [8]. Existing evaluation methods fall into automatic and human evaluation [2], with automatic evaluation gaining prominence due to its scalability and lower cost. The main automatic approaches for evaluating reasoning models include rule-based evaluation frameworks [13, 5, 27, 9, 25] and LLM-based judgment methods [20, 7, 18]. However, both approaches face limitations in reasoning model evaluation. Rule-based frameworks often struggle to extract final answers from lengthy reasoning traces, rely on strict formatting (e.g., syntactically correct LaTeX), and typically ignore the reasoning process itself—an oversimplification challenged by many researchers [36, 33, 14, 32]. Judge models are usually not optimized for reasoning evaluation and mainly produce qualitative scores or comments [7], making them more suitable for subjective questions. Objective tasks, in contrast, require accurate binary classification. Currently, effective automatic methods specifically designed for evaluating reasoning on objective questions remain lacking. + +To address these challenges, we introduce xVerify, an efficient LLM-answer verifier tailored for evaluating LLM responses to objective questions. xVerify processes the full LLM output, enabling it to accurately identify final answers from complex reasoning traces. It also supports robust equivalence checking, including symbol conversion (e.g., 'alpha' $\rightarrow$ 'α'), mathematical expression matching, and semantic alignment in natural language. Moreover, it is tolerant of formatting errors such as malformed LaTeX, making it applicable to a wide range of tasks, including math problems, multiple-choice, short-answer, and classification questions. To train and evaluate xVerify, we construct the Verify Answer for Reasoning (VAR) dataset, which includes responses from 19 LLMs across 24 reasoning benchmarks. All labels are verified through multi-round GPT-4o and human review. The dataset covers advanced reasoning models and benchmarks like GPQA, LiveMathBench, and AIME 2024. We fine-tune xVerify on a variety of base models (e.g., Qwen2.5, LLaMA, Gemma 2) and scales (0.5B-32B). Remarkably, even the smallest variant (xVerify-0.5B-I) surpasses existing evaluation methods—including 32B-sized models—on all metrics, while larger variants achieve F1 and accuracy over $95\%$ on both test and generalization sets. + +The main contributions of this paper are summarized as follows: + +- We construct the VAR dataset, which contains answer samples from 19 LLMs across 24 evaluation benchmarks. The dataset is annotated via multiple rounds of GPT-4o and human review, and is designed for training and evaluating judge models for reasoning tasks. +- We propose xVerify, an efficient answer verifier for reasoning model evaluations, and release multiple fine-tuned versions of xVerify. The checkpoints are publicly available2. +- We conduct comprehensive comparative evaluations against multiple existing evaluation frameworks and judge models on both test and generalization datasets, thoroughly validating the effectiveness and applicability of xVerify. + +# 2 Related Work + +Evaluation methods have always been a crucial component in the development of LLM [2]. However, the open-ended nature of LLM outputs makes it difficult to apply standardized metrics, limiting the effectiveness of traditional evaluation methods [20]. The rise of reasoning models [26, 3, 31], which often generate lengthy and complex reasoning, further complicates evaluation. For objective tasks, the main challenge is to accurately extract the final answer from the LLM's semi-structured output and compare it with the reference answer. Existing approaches are typically divided into human evaluation and automatic evaluation. While human evaluation offers flexibility, automatic methods are more cost-efficient and consistent [2]. Current automatic methods mainly include rule-based evaluation frameworks and LLM-based judgment methods. + +Rule-based methods are widely used in automatic evaluation frameworks such as LM Eval Harness [5], OpenCompass [27], UltraEval [9], and OpenAI Evalu [25]. Tools like Math-Verify [13] also follow + +this approach, extracting final answers using regular expressions (RegEx) and comparing them with reference answers. However, LLM outputs often contain final answers in varied surface forms—e.g., "alpha" vs. "α", "A" vs. "a", or "1000" vs. "10³"—which can be semantically equivalent but textually different. While some tools support limited transformations, they typically handle only LaTeX expressions or simple string patterns, and struggle with basic semantic equivalence like "one hundred" vs. "100". For reasoning models, the output is usually lengthy and involves complex reasoning steps with intermediate results. This makes it difficult for regular expressions to accurately identify the final answer, causing rule-based approaches to frequently fail in such contexts. Moreover, prior work has shown that LLMs may revise or overturn their initial predictions during extended reasoning processes, exhibiting a kind of self-reflection [32]. At the same time, rule-based methods typically ignore the reasoning process and only evaluate the final answer, which has drawn criticism from many researchers—especially in the context of reasoning models [36, 33, 14]. Thus, rule-based evaluations have limited applicability in reasoning scenarios. + +LLM-based judgment methods use fine-tuned LLMs to evaluate the quality of other LLMs' responses. Compared to traditional evaluation methods, they offer greater task adaptability, generate interpretable results, reduce evaluation costs, and can be applied across the LLM lifecycle [20, 7, 18]. For objective questions, these judge models can extract final answers from responses with intermediate reasoning or self-reflection. In recent years, many LLM-based judge models have emerged, including JudgeLM [39], PandaLM [34], Auto-J [21], Prometheus 2 [17], CompassJudger [1], CritiqueLLM [16], and Themis [12]. Judge models typically support pointwise, pairwise, and listwise evaluations [20], and some also serve as reward models in reinforcement learning. However, most are designed to assign scores to LLM outputs, making them more suitable for subjective evaluations like helpfulness, reliability, or relevance. For objective questions that require binary decisions ("correct" or "incorrect"), these models are less effective. Although scores can be binarized using thresholds, this approach is unreliable, as the models are not explicitly trained for such tasks. Moreover, the current LLM-based critic models and PRMs (Process Reward Models) exhibit subpar performance when detecting errors in long chain-of-thought responses generated by reasoning models [10]. Thus, while judge model holds promise for evaluating reasoning models, they require targeted training. + +In summary, automatic evaluation on objective tasks remains underdeveloped. Rule-based and LLM-based methods each have clear limitations, while human annotation is costly and hard to scale. To address these challenges, we propose xVerify, a robust and targeted judge model specifically designed for objective evaluation of LLMs. + +# 3 Problem Definition + +To evaluate the correctness of LLM responses to objective questions, the key is to extract the final answer from the response and compare it with the reference answer. We formally define this evaluation task as follows: + +We formalize this task as a 4-tuple $(\mathrm{Q},\mathrm{R},\mathrm{A}_{\mathrm{ref}},\mathrm{E})$ , where $\mathrm{Q} = \{q_1,q_2,\dots,q_n\}$ is the set of questions, $\mathrm{R} = \{r_1,r_2,\dots,r_n\mid r_i = \mathcal{W}(q_i)\}$ is the set of responses generated by an LLM $\mathcal{W}$ , $\mathrm{A}_{\mathrm{ref}} = \{a_{ref}^{1},\dots,a_{ref}^{n}\}$ is the set of reference answers, and $\mathrm{E}:\mathrm{Q}\times \mathrm{R}\times \mathrm{A}_{\mathrm{ref}}\to 0,1$ is the evaluation function that returns 1 if the response is correct and 0 otherwise. + +For the stage of extracting the final answer, given a response $r$ to question $q$ , which may include intermediate reasoning and multiple candidate answers, we denote the extracted candidates as $\mathrm{A}(r)$ . To identify the final answer, we define a scoring function $\mathrm{S} : \mathrm{A}(r) \times \mathrm{Q} \to \mathbb{R}$ that measures the relevance or suitability of each candidate $a \in \mathrm{A}(r)$ to $q$ , and select the final answer using the extraction function: + +$$ +\varepsilon (q, r) = \arg \max _ {a \in \mathrm {A} (r)} \mathrm {S} (a, q). \tag {1} +$$ + +For the equivalence comparison stage, we define an equivalence function $\psi : \mathrm{A}_{\mathrm{ref}} \times \mathrm{A}_{\mathrm{final}} \to \{0,1\}$ , where $\psi$ returns 1 if the predicted answer is equivalent to the reference, and 0 otherwise. Since answers may appear in different forms, $\psi$ integrates results from the following three sub-functions: + +For mathematical expressions, we define a composite normalization function $\Phi_{\mathrm{norm}}^{\mathrm{math}} = \phi_{\mathrm{err}} \circ \phi_{\mathrm{syn}} \circ \phi_{\mathrm{alg}} \circ \phi_{\mathrm{dim}}$ , where $\phi_{\mathrm{err}}$ repairs minor syntax errors, $\phi_{\mathrm{syn}}$ unifies syntactic structures, $\phi_{\mathrm{alg}}$ performs + +algebraic simplification, and $\phi_{\mathrm{dim}}$ ensures consistency in physical units. By transforming expressions into a canonical form, $\Phi_{\mathrm{norm}}^{math}$ enables reliable equivalence comparison: + +$$ +\psi_ {m a t h} \left(a _ {r e f} ^ {m a t h}, a _ {f i n a l} ^ {m a t h}\right) = \left\{ \begin{array}{l l} 1 & \text {i f} \Phi_ {\text {n o r m}} ^ {m a t h} \left(a _ {r e f} ^ {m a t h}\right) = \Phi_ {\text {n o r m}} ^ {m a t h} \left(a _ {f i n a l} ^ {m a t h}\right), \\ 0 & \text {o t h e r w i s e} \end{array} \right. \tag {2} +$$ + +For natural language answers, we define a comparison function $\psi_{\mathrm{nl}}: \mathrm{A}_{\mathrm{ref}}^{\mathrm{nl}} \times \mathrm{A}_{\mathrm{final}}^{\mathrm{nl}} \to \{0,1\}$ to assess semantic equivalence. Specifically, we introduce a semantic alignment function $\phi_{\mathrm{align}}^{nl}$ to measure the similarity between two textual answers. The equivalence decision is made by comparing the alignment score with a predefined threshold $\tau$ : + +$$ +\psi_ {n l} \left(a _ {r e f} ^ {n l}, a _ {f i n a l} ^ {n l}\right) = \left\{ \begin{array}{l l} 1 & \text {i f} \phi_ {\text {a l i g n}} ^ {n l} \left(a _ {r e f} ^ {n l}, a _ {f i n a l} ^ {n l}\right) \geq \tau , \\ 0 & \text {o t h e r w i s e} \end{array} \right. \tag {3} +$$ + +For symbolic representations, we define a composite normalization function $\Phi_{\mathrm{norm}}^{sym} = \phi_{\mathrm{uni}} \circ \phi_{\mathrm{font}} \circ \phi_{\mathrm{dom}}$ which unifies symbols by applying $\phi_{\mathrm{uni}}$ for Unicode normalization, $\phi_{\mathrm{font}}$ for aligning font styles, and $\phi_{\mathrm{dom}}$ for domain-specific mappings. This produces a standardized form for character-level comparison, and the $\Phi_{\mathrm{norm}}^{sym}$ is defined as: + +$$ +\psi_ {s y m} \left(a _ {r e f} ^ {s y m}, a _ {f i n a l} ^ {s y m}\right) = \left\{ \begin{array}{l l} 1 & \text {i f} \Phi_ {\text {n o r m}} ^ {s y m} \left(a _ {r e f} ^ {s y m}\right) = \Phi_ {\text {n o r m}} ^ {s y m} \left(a _ {f i n a l} ^ {s y m}\right), \\ 0 & \text {o t h e r w i s e} \end{array} \right. \tag {4} +$$ + +Based on the above components, we define a unified equivalence function $\psi$ to determine whether the final answer $a_{final}$ matches the reference answer $a_{ref}$ across different modalities. The definition is: + +$$ +\psi \left(a _ {\text {f i n a l}}, a _ {\text {r e f}}\right) = \left\{ \begin{array}{l l} 1, & \text {i f} \psi_ {\text {m a t h}} \left(a _ {\text {f i n a l}} ^ {\text {m a t h}}, a _ {\text {r e f}} ^ {\text {m a t h}}\right) = 1 \\ & \quad \wedge \psi_ {\text {n l}} \left(a _ {\text {f i n a l}} ^ {\text {n l}}, a _ {\text {r e f}} ^ {\text {n l}}\right) = 1 \\ & \quad \wedge \psi_ {\text {s y m}} \left(a _ {\text {f i n a l}} ^ {\text {s y m}}, a _ {\text {r e f}} ^ {\text {s y m}}\right) = 1; \\ 0, & \text {o t h e r w i s e} \end{array} \right. \tag {5} +$$ + +Here, $a_{final}^{math}, a_{final}^{nl}$ , and $a_{final}^{sym}$ represent the mathematical, natural language, and symbolic parts of the final answer, respectively, and similarly for $a_{ref}$ . This allows for equivalence checking in both unimodal and multimodal settings. + +To summarize, the overall evaluation function $\mathrm{E}$ is defined as: + +$$ +\mathrm {E} (q, r, a _ {r e f}) = \psi (\varepsilon (q, r), a _ {r e f}) \tag {6} +$$ + +where $q$ is the objective question, $r$ is the response generated by the LLM, and $a_{ref}$ is the corresponding reference answer. + +# 4 Methodology + +The xVerify training and evaluation pipeline includes three main stages: collecting LLM responses, VAR dataset construction, and xVerify judge pipeline (see Figure 1). We first gather question-response pairs from various LLMs across four types of objective questions, including complex, reasoning-intensive examples. To ensure accurate labels, we employ multiple rounds of annotation and rechecking using both GPT-4o and human annotators. We also apply data augmentation to increase the dataset's diversity and complexity. Finally, we train xVerify models of different sizes on the VAR dataset to evaluate long, multi-step answers—cases that are often difficult for existing evaluation methods. Section 4.1 details the dataset construction, and Section 4.2 describes the training process. + +![](images/6d439d7f1c4119a53063123e4a1a272a2937ccfe8b265eebde7d815d650c02cb.jpg) +Figure 1: Framework of xVerify: (1) Collecting LLM Responses: aggregate responses from multiple LLMs across datasets covering four question types. (2) VAR Dataset Construction: employ GPT-4o and human annotators for labeling and rechecking, and use data augmentation to refine the dataset. (3) xVerify Judge Pipeline: accurately evaluate multi-component answers from reasoning models on challenging questions. + +# 4.1 VAR Dataset + +xVerify is designed to assess the correctness of reasoning models' responses on objective questions. However, current judge models are mostly trained on tasks such as scoring or reviewing, and reasoning models with lengthy responses have only recently emerged. As a result, there is currently no suitable dataset for training xVerify. To better train and evaluate xVerify, we constructed a dedicated dataset named Verify Answer for Reasoning (VAR). Examples from the VAR dataset are provided in Appendix B.3. + +# 4.1.1 LLM Response Generation + +To ensure the diversity and coverage of the dataset, we selected 19 mainstream LLMs and 24 frequently used multilingual datasets to generate and collect responses. To better simulate the answering patterns of reasoning models in common evaluation scenarios, the chosen LLMs include recently released models such as the DeepSeek-R1-Distill series [3] and QwQ-32B [31]. Most of the other LLMs also support context lengths exceeding $32k$ tokens, enabling them to produce answers with extended reasoning chains. The selected datasets include high-difficulty benchmarks commonly used for evaluating reasoning models, such as GPQA [28], AIME 2024 [24], MATH [11], and LiveCodeBench [23], which typically require multi-step reasoning and computation to solve. During data generation, we also retained some extremely long responses, such as those exceeding 6k characters in length. Detailed information on all LLMs and datasets is provided in Appendix A. + +To train and evaluate xVerify more effectively, we grouped the 24 datasets into four types based on question and answer formats: multiple choice, math, short answer, and classification. Multiple choice questions offer several labeled options; math includes questions where answers are mathematical expressions (e.g., numbers, equations), including mathematics and physics problems; short answer questions expect brief natural language responses like names or dates, with no strict format constraints; classification tasks involve selecting the correct label, such as for sentiment or topic classification. + +To reflect realistic evaluation settings and generate a diverse set of Q&A samples, we designed multiple prompt templates for guiding the LLMs in response generation. The prompt configurations vary along several dimensions: 0-shot vs. 5-shot, with or without CoT, and with or without answer format restrictions (restrict), resulting in eight distinct prompt types. Details of all prompt templates are provided in Appendix D.1. + +In total, we generated 191,600 Q&A samples using the 19 LLMs and 24 evaluation sets, providing a rich and diverse sample pool for constructing the dataset. + +# 4.1.2 Dataset Partitioning + +Based on the previously collected sample pool, we constructed the training, test, and generalization sets through filtering and preprocessing. + +The training and test sets are used to train and evaluate the xVerify model. Both are sampled from the same pool, sharing similar distributions. Specifically, they include samples generated by 15 LLMs across 17 evaluation sets, covering the four previously mentioned question types. The training set contains 36,941 samples, and the test set includes 5,194 samples. + +The generalization set complements the test set by evaluating xVerify's ability to handle more diverse and challenging distributions, reflecting real-world scenarios. It consists of 5,366 samples from 7 evaluation sets not used in the training or test sets, while still spanning all four question types. These samples are generated by 19 LLMs, including 4 models not seen in training or testing, such as the reasoning model QwQ-32B, resulting in greater diversity and distribution shift. + +Section 4.1.4 introduces our data augmentation strategy, which adds more challenging samples to all three sets. Detailed dataset statistics are provided in Appendix B.1. + +# 4.1.3 Data Annotations + +To ensure the accuracy of xVerify's training and evaluation, we conducted multiple rounds of automatic and manual annotation across the three datasets. Specifically, we used GPT-4o to perform two rounds of annotation for all samples in the datasets, utilizing two distinct prompt templates (details provided in Appendix D.2) to improve annotation confidence [33, 22]. Given the large size of the training set, we only applied manual annotation to the more challenging math problems and to samples where the two rounds of GPT-4o annotations disagreed. In contrast, for the test and generalization sets, we manually annotated all samples, resulting in a three-round annotation process to maximize label reliability. Details of the manual annotation process are provided in Appendix B.2. + +# 4.1.4 Data Augmentation + +![](images/c7b64161e6bad9b6a55e1d24b6081bbc80d1590623a46a0a5887f61e520e322d.jpg) +Figure 2: Data Augmentation Pipelines: (1) transformation of multiple-choice options through numbering conversion and noise injection, (2) diversification of mathematical answers via equivalent expression generation, and (3) final answer sentence transformation using prompt rephrasing, symbol wrapping, and gap token insertion. + +To further enhance the diversity and robustness of the dataset, we designed a series of data augmentation strategies (illustrated in Figure 2) to better simulate real-world evaluation settings and improve the model's tolerance to varied answer formats. + +For multiple-choice questions, we applied two types of augmentations: option index transformation and noise injection. The former converts alphabetical labels to Arabic or Roman numerals, while the latter randomly adds or removes irrelevant distractor options without changing the original question intent, thereby increasing structural complexity. + +For math problems, we used two approaches: augmentation based on reference answers and LLM responses. In the first approach, we generated 3-5 mathematically equivalent expressions of each reference answer through symbolic and formal transformations, then created new samples accordingly. In the second, we applied the same transformation logic to the final answers in LLM responses, enriching the dataset with varied mathematical formats and helping the model learn equivalence across symbolic expressions. + +We also augmented the final answer statements. Specifically, we extracted answer-bearing sentences from responses generated using restrict prompts, and applied over 1,000 transformation patterns. These included: 20 variations of prompt rephrasing (e.g., "The answer is B" $\rightarrow$ "The most appropriate answer is B"), 18 symbolic wrappers (e.g., wrapping B as $B$ ), and 5 forms of delimiter insertions (e.g., adding a colon or space before the answer). This improved diversity in answer formats and reduced overfitting to specific templates. + +Together, these strategies expanded the expressive space of the dataset while preserving semantic consistency, offering richer and more challenging training signals for xVerify. After augmentation, the sizes of the training, test, and generalization sets increased to 43,204, 6,122, and 6,468 samples respectively. Full dataset details are provided in Appendix B.1. The augmentation of math problems primarily relied on GPT-4o; prompt templates are listed in Appendix D.3. + +# 4.2 Model Training + +We trained 14 models with different parameter sizes and architectures using the training set from the VAR dataset. Specifically, we utilized the LLaMA-Factory framework [38] and QLoRA technique [4] for model training. Based on extensive experimentation, we set the number of epochs to 1 and selected a learning rate of 1e-4 as the optimal configuration, with other hyperparameters detailed in Appendix C.1. Many researchers have pointed out potential bias in using LLMs as judge models, where models from the same family tend to receive higher ratings [19]. To thoroughly evaluate the generalization capability of the xVerify method, we trained 14 models with varying parameter sizes and architectures. These models ranged from 0.5B to 32B parameters and included five different families, such as LLaMA 3 [6], Qwen2.5 [37], and Gemma 2 [30]. Details of the models used are provided in Appendix C.2. + +# 5 Experiments + +In this section, we will present the configuration, results, and detailed analysis of the xVerify model evaluation experiments. First, we will outline the experimental setup: + +- Datasets: The datasets used in the evaluation experiments are the test set and generalization set from the VAR dataset. The test set is used to evaluate the xVerify model's performance, while the generalization set supplements the test set by simulating real-world scenarios with a broader sample distribution to assess the model's generalization ability. +- Metrics: The evaluation mainly uses accuracy and F1 score on both the test and generalization sets. Accuracy shows the model's overall performance, while the F1 score combines precision and recall for a more complete perspective. +- Baselines: There are two types of baselines: evaluation frameworks and judge models. The evaluation frameworks include DeepSeek-Math [29], LM Eval Harness [5], Math-Verify [13], OpenAI Evalu [25], OpenCompass [27], and UltraEval [9]. The judge models include PandaLM [34], Auto-J [21], Prometheus 2 [17], JudgeLM [39], and CompassJudger [1]. In addition, GPT-4o is also used as a judge model with two strategies: one with CoT and one without. The prompts for the judge model and xVerify are provided in Appendix D.4 and Appendix D.5. + +Test Set Evaluation Results. We evaluated all evaluation frameworks, judge models, and the xVerify model on the VAR test set (see Table 1). Overall, the xVerify model outperforms all evaluation frameworks and judge models, including GPT-4o, with the best and second-best values in each column appearing for the xVerify model. + +Table 1: Evaluation Accuracy Results on the Test Set. "-" indicates that the evaluation method is not applicable to the problem type. The best performance in each column will be shown in bold, and the second-best performance will be underlined. + +
Method TypeMethodMultiple ChoiceMathShort AnswerClassificationOverall
F1Acc.F1Acc.F1Acc.F1Acc.F1Acc.
Evaluation FrameworkDeepSeek Math Verify70.77%75.17%78.34%84.30%----74.90%52.52%
LM Eval Harness58.44%68.19%25.16%28.27%53.41%44.51%72.35%66.94%47.67%48.32%
Math-Verify5.88%53.76%82.55%86.70%42.27%71.91%0.00%29.66%45.64%65.91%
OpenAI Simple Evals23.61%28.02%66.79%76.88%42.23%55.32%73.29%67.87%51.17%58.10%
OpenCompass68.11%72.52%79.25%84.73%----74.18%79.64%
UltraEval17.34%18.04%8.88%56.89%----13.95%40.71%
Judge ModelPandaLM-7B-v14.26%8.12%16.78%14.46%23.47%17.72%25.32%16.79%16.40%13.72%
Auto-J-Bilingual-6B52.85%67.71%40.76%65.21%67.22%79.60%74.86%71.37%57.04%69.59%
Auto-J-13B40.00%63.20%26.32%60.62%64.41%78.22%86.04%82.60%53.38%68.13%
Prometheus-7B-v2.075.76%75.41%74.20%74.35%70.95%74.59%84.80%77.03%76.50%75.11%
Prometheus-8x7B-v2.071.26%68.61%71.99%66.92%76.24%77.70%83.27%77.65%74.57%71.12%
JudgeLM-7B-v1.056.53%42.57%46.09%34.58%60.33%50.56%83.89%73.22%59.02%45.90%
JudgeLM-13B-v1.056.81%48.89%58.39%59.46%77.32%79.52%95.63%93.82%68.57%65.83%
JudgeLM-33B-v1.042.86%43.24%44.82%46.03%57.86%62.23%73.42%67.56%52.00%51.75%
CompassJudger-1-1.5B49.95%35.54%61.66%48.78%57.36%46.93%82.51%70.96%61.94%48.35%
CompassJudger-1-7B70.05%62.78%66.62%58.86%67.47%65.08%92.99%89.50%72.72%65.96%
CompassJudger-1-14B58.94%44.62%55.09%40.76%59.66%52.90%90.87%86.61%63.22%51.37%
CompassJudger-1-32B95.09%95.37%84.11%84.30%94.95%96.11%98.45%97.84%91.67%91.69%
GPT-4o as Judge96.61%96.75%95.27%95.80%95.01%96.20%98.14%97.43%96.25%96.39%
GPT-4o as Judge (CoT)97.10%97.23%95.41%95.88%95.63%96.63%99.56%99.38%96.85%96.95%
xVerifyxVerify-0.5B-I97.78%97.90%93.74%94.64%96.72%97.49%99.71%99.59%96.69%96.85%
xVerify-3B-Ib97.31%97.41%95.65%96.18%96.38%97.23%99.78%99.69%97.17%97.27%
xVerify-7B-I97.75%97.84%95.94%96.44%96.51%97.32%99.78%99.69%97.41%97.50%
xVerify-9B-I97.43%97.53%95.75%96.27%96.06%96.97%99.78%99.69%97.19%97.29%
xVerify-14B-Ia97.49%97.59%95.73%96.22%95.41%96.46%99.63%99.49%97.06%97.16%
xVerify-32B-I97.81%97.90%95.88%96.31%96.18%97.06%99.71%99.59%97.32%97.40%
+ +Among the evaluation frameworks, the best performers were DeepSeek Math Verify and OpenCompass, but neither achieved an F1 score nor accuracy exceeding $80\%$ . Some evaluation frameworks were also not suitable for certain question types, which is an inherent limitation of rule-based methods—strong in specificity but limited in applicability. For instance, OpenCompass was completely unsuitable for short answer and classification questions. Additionally, the long reasoning processes generated by reasoning models made it difficult for evaluation frameworks to extract final answers, lowering their overall performance. + +Among judge models, GPT-4o and CompassJudger showed the best overall performance. The CompassJudger-1-32B model achieved F1 score and accuracy of $91.67\%$ and $91.69\%$ , respectively. However, the model performed poorly on math questions, with both F1 score and accuracy below $85\%$ , indicating that it handles simpler questions well but struggles with formula equivalence in math problems. Furthermore, only the 32B version of this judge model achieved over $90\%$ F1 score and accuracy, while smaller models performed below $80\%$ . Therefore, the performance of CompassJudger-1-32B is more a result of the base model's capabilities rather than the subsequent training. For example, the smallest xVerify-0.5B-I model outperforms CompassJudger-1-32B across the board, indicating that the VAR training set significantly improves model evaluation performance. GPT-4o's overall performance is very close to xVerify, but the improvement after using CoT is small, with token consumption nearly doubling. Specifically, GPT-4o as Judge evaluated the entire test set at a cost of $13.09, while GPT-4o as Judge (CoT) cost $20.15 (using the OpenAI API, charged by token count). + +In contrast, even the smallest xVerify-0.5B-I model outperforms all methods except GPT-4o as Judge (CoT) in overall performance, and the xVerify-3B-Ib model surpasses all others in every evaluation metric. Moreover, for more difficult math questions, all xVerify models except xVerify-0.5B-I exceeded $95\%$ performance. We also found that the performance of the xVerify model improves as the parameter size increases, but slightly decreases after exceeding 7B parameters, likely due to overfitting on the VAR training set, which is sufficiently large for smaller models. + +Generalization Set Evaluation Results. To better assess the performance of xVerify on a broader sample distribution, we evaluated all methods on the VAR generalization set, as shown in Table 2. On the generalization set, the xVerify model showed a slight decrease in overall performance. However, the drop in both F1 score and accuracy was less than $1.5\%$ , while other methods showed mixed results. Overall, the xVerify model still outperformed all other methods, indicating that although + +overfitting exists in xVerify, it is limited and the model maintains strong generalization ability on samples outside the training set distribution. + +Table 2: Evaluation Accuracy Results on the Generalization Set. "--" indicates that the evaluation method is not applicable to the problem type. The best performance in each column will be shown in bold, and the second-best performance will be underlined. + +
Method TypeMethodMultiple ChoiceMathShort AnswerClassificationOverall
F1Acc.F1Acc.F1Acc.F1Acc.F1Acc.
Evaluation FrameworkDeepSeek Math Verify72.90%73.39%11.69%79.83%----60.57%44.42%
LM Eval Harness61.60%65.37%7.03%18.48%58.22%45.09%92.06%88.21%55.81%51.30%
Math-Verify5.19%45.10%64.18%87.68%9.12%52.75%0.00%24.59%16.10%55.53%
OpenAI Simple Evals28.72%29.23%24.31%78.90%58.33%59.58%94.39%91.62%57.99%63.36%
OpenCompass71.64%71.44%47.22%84.39%----65.74%78.18%
UltraEval16.29%15.31%13.55%78.39%----15.71%48.13%
Judge ModelPandaLM-7B-v14.28%7.85%9.91%15.97%45.81%31.43%36.23%25.99%23.74%19.14%
Auto-J-Bilingual-6B52.07%60.75%10.56%74.79%85.16%86.76%84.90%79.91%67.20%74.57%
Auto-J-13B34.87%52.78%9.86%76.54%85.12%86.97%77.67%71.99%60.43%71.35%
Prometheus-7B-v2.076.67%73.66%49.08%71.46%81.52%81.32%79.59%71.92%73.85%74.35%
Prometheus-8x7B-v2.074.13%68.60%49.48%60.27%87.15%86.13%84.70%77.19%74.51%71.69%
JudgeLM-7B-v1.060.22%45.71%12.71%15.40%72.15%62.51%86.11%76.18%59.11%46.38%
JudgeLM-13B-v1.065.39%57.80%21.61%44.87%86.11%84.53%91.78%86.89%69.18%65.63%
JudgeLM-33B-v1.046.99%45.10%20.31%39.99%71.34%66.69%41.92%33.36%46.06%46.01%
CompassJudger-1-1.5B55.75%40.87%34.53%33.62%63.93%51.57%84.49%73.93%60.01%47.65%
CompassJudger-1-7B74.31%65.20%38.27%39.89%88.99%88.15%93.29%89.29%73.47%67.47%
CompassJudger-1-14B63.65%49.50%27.63%21.20%73.61%66.48%88.97%81.92%63.10%51.21%
CompassJudger-1-32B92.93%92.32%72.05%84.91%96.81%96.86%98.05%97.05%91.90%92.04%
GPT-4o as Judge95.86%95.38%87.91%94.76%97.46%97.49%98.67%97.98%96.03%96.18%
GPT-4o as Judge (CoT)95.44%94.88%88.34%94.71%97.39%97.42%98.36%97.52%95.79%95.92%
xVerifyxVerify-0.5B-I96.49%96.10%80.00%91.94%96.95%97.00%99.03%98.53%95.29%95.53%
xVerify-3B-Ib96.21%95.71%86.20%94.15%97.60%97.63%99.03%98.53%96.08%96.23%
xVerify-7B-I96.16%95.66%87.86%94.87%97.45%97.49%98.93%98.37%96.22%96.37%
xVerify-9B-I96.06%95.55%87.47%94.76%97.53%97.56%99.13%98.68%96.23%96.38%
xVerify-14B-Ia96.11%95.60%90.20%95.74%97.32%97.35%99.13%98.68%96.53%96.65%
xVerify-32B-I96.22%95.71%90.09%95.59%97.32%97.35%99.03%98.53%96.50%96.60%
+ +Specifically, the overall F1 score and accuracy of all evaluation frameworks remained below $80\%$ with only OpenCompass achieving an overall accuracy above $70\%$ . This indicates that rule-based evaluation frameworks have significant limitations in generalization performance, struggling to effectively handle the diverse answers and evaluation sets from LLMs. + +Among the judge models, the best-performing ones remained GPT-4o and CompassJudger. However, all judge models except GPT-4o had an F1 score below $75\%$ on math questions, with most models scoring below $50\%$ , indicating that judge models almost entirely fail in evaluating more diverse and complex math problems. GPT-4o as Judge and GPT-4o as Judge (CoT) also failed to achieve an F1 score above $90\%$ on math problems, suggesting that the math samples in the generalization set indeed present challenges for evaluation methods. Furthermore, GPT-4o's performance did not improve after using CoT; instead, it showed a slight decline. This suggests that in broader scenarios, CoT-based prompt engineering methods do not effectively improve GPT-4o's performance as a judge model, and model fine-tuning may be a better option. + +In contrast, the xVerify-0.5B-I model outperformed all evaluation methods except GPT-4o, and the xVerify-3B-Ib model outperformed both CoT-based GPT-4o methods. For more difficult math problems, the F1 score and accuracy of the xVerify-14B-Ia and xVerify-32B-I models exceeded $90\%$ . Additionally, we observed that as the parameter size of the xVerify model increased, the performance drop on the generalization set decreased. For example, the accuracy drop for xVerify-0.5B-I was $1.33\%$ , $0.91\%$ for xVerify-9B-I, and $0.80\%$ for xVerify-32B-I, suggesting that larger xVerify models exhibit stronger generalization performance. + +Furthermore, we comprehensively evaluated the performance of 14 x Verify models on both the test and generalization sets, and tested the computational efficiency of all x Verify and judge models, along with the evaluation cost of GPT-4o as a judge model. The results showed that x Verify models outperform other judge models in both usage cost and evaluation efficiency. Full experimental results can be found in Appendix E. + +# 6 Conclusion + +In this paper, we propose an efficient answer verifier for reasoning model evaluations, named xVerify, which can effectively assess the correctness of long reasoning responses generated by + +reasoning models on various difficult objective questions. To train and evaluate the xVerify model, we constructed the VAR dataset based on several popular LLMs and evaluation sets. This dataset primarily collects long reasoning responses generated by reasoning models on challenging questions, and multiple rounds of labeling and verification were conducted using GPT-4o and human annotators. Ultimately, we trained multiple xVerify models of varying specifications based on the VAR dataset and performed comparative evaluations with several evaluation frameworks and judge models on both the test and generalization sets. The experimental results show that even the smallest xVerify-0.5B-I model outperforms all methods except GPT-4o, and larger xVerify models surpass all other methods, demonstrating the effectiveness and generalization ability of xVerify. + +# References + +[1] Maosong Cao, Alexander Lam, Haodong Duan, Hongwei Liu, Songyang Zhang, and Kai Chen. Compassjudger-1: All-in-one judge model helps model evaluation and evolution. arXiv preprint arXiv:2410.16256, 2024. +[2] Yupeng Chang, Xu Wang, Jindong Wang, Yuan Wu, Linyi Yang, Kaijie Zhu, Hao Chen, Xiaoyuan Yi, Cunxiang Wang, Yidong Wang, et al. A survey on evaluation of large language models. ACM transactions on intelligent systems and technology, 15(3):1-45, 2024. +[3] DeepSeek-AI, Daya Guo, Dejian Yang, et al. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning, 2025. +[4] Tim Dettmers, Artidoro Pagnoni, Ari Holtzman, and Luke Zettlemoyer. Qlora: Efficient finetuning of quantized llms. Advances in neural information processing systems, 36:10088-10115, 2023. +[5] Leo Gao, Jonathan Tow, Baber Abbasi, Stella Biderman, Sid Black, Anthony DiPofi, Charles Foster, Laurence Golding, Jeffrey Hsu, Alain Le Noac'h, Haonan Li, Kyle McDonell, Niklas Muennighoff, Chris Ociepa, Jason Phang, Laria Reynolds, Hailey Schoelkopf, Aviya Skowron, Lintang Sutawika, Eric Tang, Anish Thite, Ben Wang, Kevin Wang, and Andy Zou. A framework for few-shot language model evaluation, September 2021. +[6] Aaron Grattafori, Abhimanyu Dubey, Abhinav Jauhri, et al. The llama 3 herd of models, 2024. +[7] Jiawei Gu, Xuhui Jiang, Zhichao Shi, Hexiang Tan, Xuehao Zhai, Chengjin Xu, Wei Li, Yinghan Shen, Shengjie Ma, Honghao Liu, Saizhuo Wang, Kun Zhang, Yuzhuo Wang, Wen Gao, Lionel Ni, and Jian Guo. A survey on llm-as-a-judge, 2025. +[8] Zishan Guo, Renren Jin, Chuang Liu, Yufei Huang, Dan Shi, Supryadi, Linhao Yu, Yan Liu, Jiaxuan Li, Bojian Xiong, and Deyi Xiong. Evaluating large language models: A comprehensive survey, 2023. +[9] Chaoqun He, Renjie Luo, Shengding Hu, Yuanqian Zhao, Jie Zhou, Hanghao Wu, Jiajie Zhang, Xu Han, Zhiyuan Liu, and Maosong Sun. Ultraeval: A lightweight platform for flexible and comprehensive evaluation for llms. arXiv preprint arXiv:2404.07584, 2024. +[10] Yancheng He, Shilong Li, Jiaheng Liu, Weixun Wang, Xingyuan Bu, Ge Zhang, Zhongyuan Peng, Zhaoxiang Zhang, Zhicheng Zheng, Wenbo Su, and Bo Zheng. Can large language models detect errors in long chain-of-thought reasoning?, 2025. +[11] Dan Hendrycks, Collin Burns, Saurav Kadavath, Akul Arora, Steven Basart, Eric Tang, Dawn Song, and Jacob Steinhardt. Measuring mathematical problem solving with the math dataset. In J. Vanschoeren and S. Yeung, editors, Proceedings of the Neural Information Processing Systems Track on Datasets and Benchmarks, volume 1, 2021. +[12] Xinyu Hu, Li Lin, Mingqi Gao, Xunjian Yin, and Xiaojun Wan. Themis: A reference-free nlg evaluation language model with flexibility and interpretability. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, pages 15924-15951, 2024. +[13] Greg Gandenberger Hynek Kydlíček. GitHub - huggingface/Math-Verify: A robust mathematical expression evaluation system designed for assessing Large Language Model outputs in mathematical tasks., 2024. + +[14] Alon Jacovi, Yonatan Bitton, Bernd Bohnet, Jonathan Herzig, Or Honovich, Michael Tseng, Michael Collins, Roee Aharoni, and Mor Geva. A chain-of-thought is as strong as its weakest link: A benchmark for verifiers of reasoning chains. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 4615–4634, Bangkok, Thailand, August 2024. Association for Computational Linguistics. +[15] Aaron Jaech, Adam Kalai, Adam Lerer, Adam Richardson, Ahmed El-Kishky, Aiden Low, Alec Helyar, Aleksander Madry, Alex Beutel, Alex Carney, et al. Openai o1 system card. arXiv preprint arXiv:2412.16720, 2024. +[16] Pei Ke, Bosi Wen, Zhuoer Feng, Xiao Liu, Xuanyu Lei, Jiale Cheng, Shengyuan Wang, Aohan Zeng, Yuxiao Dong, Hongning Wang, Jie Tang, and Minlie Huang. Critiquellm: Towards an informative critique generation model for evaluation of large language model generation. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics, 2024. +[17] Seungone Kim, Juyoung Suk, Shayne Longpre, Bill Yuchen Lin, Jamin Shin, Sean Welleck, Graham Neubig, Moontae Lee, Kyungjae Lee, and Minjoon Seo. *Prometheus* 2: An open source language model specialized in evaluating other language models. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen, editors, *Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing*, pages 4334–4353, Miami, Florida, USA, November 2024. Association for Computational Linguistics. +[18] Dawei Li, Bohan Jiang, Liangjie Huang, Alimohammad Beigi, Chengshuai Zhao, Zhen Tan, Amrita Bhattacharjee, Yuxuan Jiang, Canyu Chen, Tianhao Wu, Kai Shu, Lu Cheng, and Huan Liu. From generation to judgment: Opportunities and challenges of llm-as-a-judge, 2025. +[19] Dawei Li, Renliang Sun, Yue Huang, Ming Zhong, Bohan Jiang, Jiawei Han, Xiangliang Zhang, Wei Wang, and Huan Liu. Preference leakage: A contamination problem in llm-as-a-judge, 2025. +[20] Haitao Li, Qian Dong, Junjie Chen, Huixue Su, Yujia Zhou, Qingyao Ai, Ziyi Ye, and Yiqun Liu. Llms-as-judges: A comprehensive survey on llm-based evaluation methods, 2024. +[21] Junlong Li, Shichao Sun, Weizhe Yuan, Run-Ze Fan, hai zhao, and Pengfei Liu. Generative judge for evaluating alignment. In The Twelfth International Conference on Learning Representations, 2024. +[22] Xun Liang, Shichao Song, Zifan Zheng, Hanyu Wang, Qingchen Yu, Xunkai Li, Rong-Hua Li, Yi Wang, Zhonghao Wang, Feiyu Xiong, and Zhiyu Li. Internal consistency and self-feedback in large language models: A survey, 2024. +[23] Junnan Liu, Hongwei Liu, Linchen Xiao, Ziyi Wang, Kuikun Liu, Songyang Gao, Wenwei Zhang, Songyang Zhang, and Kai Chen. Are your llms capable of stable reasoning? arXiv preprint arXiv:2412.13147, 2024. +[24] MAA. American invitational mathematics examination - aide. American Invitational Mathematics Examination - AIME 2024, February 2024. +[25] OpenAI. GitHub - openai/evals: Evals is a framework for evaluating LLMs and LLM systems, and an open-source registry of benchmarks., 2024. +[26] OpenAI. Openai o3-mini, 2025. +[27] OpenMMLab. Opencompass: A universal evaluation platform for foundation models. https://github.com/open-compass/opencompass, 2023. +[28] David Rein, Betty Li Hou, Asa Cooper Stickland, Jackson Petty, Richard Yuanzhe Pang, Julien Dirani, Julian Michael, and Samuel R. Bowman. GPQA: A graduate-level google-proof q&a benchmark. In First Conference on Language Modeling, 2024. +[29] Zhihong Shao, Peiyi Wang, Qihao Zhu, et al. Deepseekmath: Pushing the limits of mathematical reasoning in open language models, 2024. + +[30] Gemma Team, Morgane Riviere, Shreya Pathak, et al. Gemma 2: Improving open language models at a practical size, 2024. +[31] Qwen Team. Qwq-32b: Embracing the power of reinforcement learning, March 2025. +[32] Xinpeng Wang, Bolei Ma, Chengzhi Hu, Leon Weber-Genzel, Paul Röttger, Frauke Kreuter, Dirk Hovy, and Barbara Plank. "my answer is C": First-token probabilities do not match text answers in instruction-tuned language models. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Findings of the Association for Computational Linguistics: ACL 2024, pages 7407–7416, Bangkok, Thailand, August 2024. Association for Computational Linguistics. +[33] Xuezhi Wang, Jason Wei, Dale Schuurmans, Quoc V Le, Ed H. Chi, Sharan Narang, Aakanksha Chowdhery, and Denny Zhou. Self-consistency improves chain of thought reasoning in language models. In The Eleventh International Conference on Learning Representations, 2023. +[34] Yidong Wang, Zhuohao Yu, Zhengran Zeng, Linyi Yang, Cunxiang Wang, Hao Chen, Chaoya Jiang, Rui Xie, Jindong Wang, Xing Xie, Wei Ye, Shikun Zhang, and Yue Zhang. Pandalm: An automatic evaluation benchmark for llm instruction tuning optimization. 2024. +[35] Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, brian richter, Fei Xia, Ed Chi, Quoc V Le, and Denny Zhou. Chain-of-thought prompting elicits reasoning in large language models. In S. Koyejo, S. Mohamed, A. Agarwal, D. Belgrave, K. Cho, and A. Oh, editors, Advances in Neural Information Processing Systems, volume 35, pages 24824-24837. Curran Associates, Inc., 2022. +[36] Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, brian richter, Fei Xia, Ed Chi, Quoc V Le, and Denny Zhou. Chain-of-thought prompting elicits reasoning in large language models. In S. Koyejo, S. Mohamed, A. Agarwal, D. Belgrave, K. Cho, and A. Oh, editors, Advances in Neural Information Processing Systems, volume 35, pages 24824-24837. Curran Associates, Inc., 2022. +[37] An Yang, Baosong Yang, Beichen Zhang, et al. Qwen2.5 technical report. arXiv preprint arXiv:2412.15115, 2024. +[38] Yaowei Zheng, Richong Zhang, Junhao Zhang, Yanhan Ye, Zheyan Luo, Zhangchi Feng, and Yongqiang Ma. Llamafactory: Unified efficient fine-tuning of $100+$ language models. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 3: System Demonstrations), Bangkok, Thailand, 2024. Association for Computational Linguistics. +[39] Lianghui Zhu, Xinggang Wang, and Xinlong Wang. JudgeLM: Fine-tuned large language models are scalable judges. In The Thirteenth International Conference on Learning Representations, 2025. + +# Appendices + +A Datasets and Models 14 +B VAR Dataset Details 14 + +B.1 Details of Training, Test, and Generalization Sets 15 +B.2 Details of Human Annotation 19 +B.3 Examples from the VAR Dataset 21 + +C Model Training Details 22 + +C.1 Training Hyperparameters 22 +C.2 Original Model Details 22 + +D Prompts 22 + +D.1 Prompts for Generating LLM Responses 22 +D.2 Prompts for GPT-4o Annotation 23 +D.3 Prompts for Data Augmentation 23 +D.4 Prompts for Judge Model 23 +D.5 Prompts for xVerify 25 + +E Supplementary Experimental Results 25 + +E.1 Evaluation Accuracy Results of All xVerify Models 25 +E.2 Computational Efficiency and Operational Cost of xVerify and Judge Models 26 + +# A Datasets and Models + +This section will present the relevant information for all the public datasets and LLMs involved in the experiments of this paper. + +In this study, we employ a total of 24 datasets, which are categorized into four primary types: multiple-choice questions (Choice), short answer questions (Short Answer), mathematical problems (Math), and classification tasks (Classification), as summarized in Table 3. To evaluate the multilingual capabilities of the xVerify model, each question type includes datasets in both Chinese and English, with one dataset featuring multilingual content. For each dataset, samples are partitioned into training and test sets following a 2:1 ratio, with the training and test sets ideally comprising 2,000 and 1,000 instances, respectively. In certain cases, the number of available samples is below 3,000, or the official test set is not publicly available, resulting in reduced dataset sizes after preprocessing. + +Table 3: Datasets Description. The "Type" column indicates the question type in the corresponding dataset, including multiple-choice questions (Choice), short answer questions (Short Answer), math questions (Math), and classification questions (Classification). + +
DatasetType#Train#TestLanguageLicense
CMMLUChoice20001000ChineseCC-BY-NC-4.0
C-EvalChoice1346260ChineseCC-BY-NC-SA-4.0
GPQAChoice794398EnglishCC-BY-4.0
MMLUChoice18161000EnglishMIT
MMLU-ProChoice20001000EnglishMIT
MMLU-RutexChoice20001000EnglishCC-BY-4.0
AgNewsClassification20001000EnglishUnspecified
AmazonClassification20001000EnglishApache-2.0
CLUEWSCClassification15481000ChineseUnspecified
CMNLIClassification20001000ChineseApache-2.0
AMC23Math2614EnglishUnspecified
AIME 2024Math2010EnglishMIT
CMATHMath1128565ChineseCC-BY-4.0
GSM8KMath20001000EnglishMIT
LiveMathBenchMath19093English & ChineseCC-BY-4.0
MATHMath20001000EnglishMIT
MGSMMath1892946MultilingualCC-BY-SA-4.0
OlympiadBenchMath1787892English & ChineseApache-2.0
ARCShort Answer20001000EnglishCC-BY-SA-4.0
CHIDShort Answer20001000ChineseApache-2.0
C-SimpleQAShort Answer20001000ChineseCC-BY-NC-SA-4.0
DROPShort Answer20001000EnglishCC-BY-SA-4.0
FRAMESShort Answer550274EnglishApache-2.0
SimpleQAShort Answer20001000EnglishMIT
+ +A total of 19 large language models (LLMs) are utilized in our experiments, encompassing a diverse range of model sizes and types, with a particular emphasis on reasoning models (see Table 4). These models are subsequently used to collect LLM-generated responses and to train the xVerify model. + +# B VAR Dataset Details + +This section will present detailed information about the components of the VAR dataset, the details of human annotations, and examples from the dataset. + +Table 4: LLMs Description. LLMs are listed by release date. All models are chat or instruct type. "NaN" indicates that public data is unavailable. + +
Model#Para.TypePublisherDate
ChatGLM3-6B6BChatTsinghua2023.10
GPT-4oNaNChatOpenAI2024.05
Gemma-2-2B-it2BInstructGoogle2024.06
Gemma-2-9B-it9BInstructGoogle2024.06
GLM-4-9B-Chat9BChatTsinghua2024.06
InternLM2.5-7B-Chat7BChatShLab2024.06
Qwen2-1.5B-Instruct1.5BInstructAlibaba2024.06
Qwen2-7B-Instruct7BInstructAlibaba2024.06
Llama-3.1-8B-Instruct8BInstructMeta2024.07
Llama-3.2-1B-Instruct1BInstructMeta2024.09
Llama-3.2-3B-Instruct3BInstructMeta2024.09
Qwen2.5-7B-Instruct7BInstructAlibaba2024.09
Qwen2.5-14B-Instruct14BInstructAlibaba2024.09
Phi-414BChatMicrosoft2024.11
DeepSeek-R1-Distill-Llama-8B8BDistillDeepSeek2025.01
DeepSeek-R1-Distill-Qwen-1.5B1.5BDistillDeepSeek2025.01
DeepSeek-R1-Distill-Qwen-7B7BDistillDeepSeek2025.01
DeepSeek-R1-Distill-Qwen-14B14BDistillDeepSeek2025.01
QwQ-32B32BInstructAlibaba2025.03
+ +# B.1 Details of Training, Test, and Generalization Sets + +# B.1.1 Training Set + +The training set comprises 43,204 samples. Tables 5 to 8 provide the sample counts corresponding to each LLM, dataset, prompt template, and question type. Note that datasets with names containing "_enh" refer to the augmented multiple choice question datasets. + +Table 5: Number of samples from each LLM in the training set. + +
ModelSample Counts
ChatGLM3-6B2588
GPT-4o2691
Gemma-2-2B-it2657
Gemma-2-9B-it2600
GLM-4-9B-Chat2957
InternLM2.5-7B-Chat2935
Qwen2-1.5B-Instruct2700
Qwen2-7B-Instruct2898
LLaMA-3.1-8B-Instruct2852
Qwen2.5-7B-Instruct2854
Qwen2.5-14B-Instruct2801
DeepSeek-R1-Distill-Llama-8B3223
DeepSeek-R1-Distill-Qwen-1.5B3231
DeepSeek-R1-Distill-Qwen-7B3075
DeepSeek-R1-Distill-Qwen-14B3142
+ +# B.1.2 Test Set + +The test set comprises 6,122 samples. Tables 9 to 12 provide the sample counts corresponding to each LLM, dataset, prompt template, and question type. Note that datasets with names containing "_enh" refer to the augmented multiple choice question datasets. + +Table 6: Number of samples from each dataset in the training set. + +
DatasetSample Counts
CMMLU1557
CMMLU_enh1641
GPQA1587
GPQA_enh1668
MMLU1520
MMLU_enh1513
MMLU-Pro1394
MMLU-Pro_enh1442
AgNews1751
CLUEWSC5008
AMC231625
AIME 20241333
CMATH1893
GSM8K1836
MATH2485
MGSM1384
OlympiadBench_en2573
OlympiadBench_zh2709
CHID2424
C-SimpleQA1913
DROP1928
FRAMES2020
+ +Table 7: Number of samples from each prompt template in the training set. + +
Prompt TemplateSample Counts
0-shot4884
0-shot-restrict5977
0-shot-cot4907
0-shot-cot-restrict6041
5-shot4774
5-shot-restrict5866
5-shot-cot4916
5-shot-cot-restrict5839
+ +Table 8: Number of samples from each question type in the training set. + +
DatasetSample Counts
Multiple Choice12322
Math15838
Short Answer8285
Classification6759
+ +Table 9: Number of samples from each LLM in the test set. + +
ModelSample Counts
ChatGLM3-6B378
GPT-4o400
Gemma-2-2B-it416
Gemma-2-9B-it369
GLM-4-9B-Chat367
InternLM2.5-7B-Chat367
Qwen2-1.5B-Instruct433
Qwen2-7B-Instruct427
LLaMA-3.1-8B-Instruct404
Qwen2.5-7B-Instruct374
Qwen2.5-14B-Instruct415
DeepSeek-R1-Distill-Llama-8B430
DeepSeek-R1-Distill-Qwen-1.5B451
DeepSeek-R1-Distill-Qwen-7B439
DeepSeek-R1-Distill-Qwen-14B452
+ +Table 10: Number of samples from each dataset in the test set. + +
DatasetSample Counts
CMMLU216
CMMLU_enh195
GPQA207
GPQA_enh235
MMLU225
MMLU_enh222
MMLU-Pro171
MMLU-Pro_enh192
AgNews261
CLUEWSC710
AMC23258
AIME 2024186
CMATH263
GSM8K262
MATH362
MGSM205
OlympiadBench_en349
OlympiadBench_zh446
CHID347
C-SimpleQA270
DROP265
FRAMES275
+ +Table 11: Number of samples from each prompt template in the test set. + +
DatasetSample Counts
Multiple Choice1663
Math2331
Short Answer1157
Classification971
+ +Table 12: Number of samples from each question type in the test set. + +
Prompt TemplateSample Counts
0-shot680
0-shot-restrict798
0-shot-cot642
0-shot-cot-restrict891
5-shot690
5-shot-restrict789
5-shot-cot702
5-shot-cot-restrict930
+ +# B.1.3 Generalization Set + +The generalization set comprises 6,468 samples. Tables 13 to 16 provide the sample counts corresponding to each LLM, dataset, prompt template, and question type. Note that datasets with names containing "_enh" refer to the augmented multiple choice question datasets. + +Table 13: Number of samples from each LLM in the generalization set. + +
ModelSample Counts
ChatGLM3-6B300
GPT-4o305
Gemma-2-2B-it427
Gemma-2-9B-it296
GLM-4-9B-Chat339
InternLM2.5-7B-Chat341
Qwen2-1.5B-Instruct280
Qwen2-7B-Instruct346
LLaMA-3.1-8B-Instruct400
LLaMA-3.2-1B-Instruct314
LLaMA-3.2-3B-Instruct310
Qwen2.5-7B-Instruct326
Qwen2.5-14B-Instruct334
Phi-4314
DeepSeek-R1-Distill-Llama-8B341
DeepSeek-R1-Distill-Qwen-1.5B399
DeepSeek-R1-Distill-Qwen-7B375
DeepSeek-R1-Distill-Qwen-14B434
QwQ-32B287
+ +Table 14: Number of samples from each dataset in the generalization set. + +
DatasetSample Counts
C-Eval435
C-Eval_enh442
MMLU-Redux436
MMLU-Redux_enh483
Amazon646
CMNLI643
LiveMathBench_en1127
LiveMathBench_zh821
ARC807
SimpleQA628
+ +Table 15: Number of samples from each prompt template in the generalization set. + +
DatasetSample Counts
Multiple Choice1796
Math1948
Short Answer1435
Classification1289
+ +Table 16: Number of samples from each question type in the generalization set. + +
Prompt TemplateSample Counts
0-shot703
0-shot-restrict856
0-shot-cot772
0-shot-cot-restrict915
5-shot690
5-shot-restrict885
5-shot-cot756
5-shot-cot-restrict891
+ +# B.2 Details of Human Annotation + +To ensure high-quality annotation for the VAR dataset, we assembled a team of 8 annotators. Among them, 6 hold bachelor's degrees and are primarily responsible for batch annotation tasks, while the other 2 hold master's degrees and focus on reviewing complex cases or resolving discrepancies in annotations made by multiple annotators. The gender ratio within the annotation team is balanced at 1:1. In terms of compensation, all annotators were paid according to the local industry average rates. The annotation process lasted for three weeks, covering a total of 15 working days. + +![](images/b0c4cb4845ea9f03c0724fd183e74ebd191cb72e12c445bf83a4897206519880.jpg) +Figure 3: Illustration of the Label Studio Interface. + +The detailed annotation guidelines are presented below. Figure 3 shows an example of the interface used in our annotation tool. Each sample to be annotated contains four fields: question, LLM output, correct answer, and answer range. The question type includes four categories: multiple choice, math, short answer, and classification. Annotators are required to judge whether the LLM output matches the correct answer based on the question, while the answer range serves as auxiliary reference information to support the decision-making process. The specific annotation instructions and criteria are as follows: + +# Answer evaluation criteria for different question types: + +# - Multiple Choice + +For multiple-choice questions, answer options may be labeled with letters (A, B, C, D, ...) Roman numerals (I, II, III, IV, ...), or Arabic numerals (1, 2, 3, 4, ...). The LLM output is considered correct if it provides: + +- Only the correct option label; +- Only the correct option content; +- Both the correct label and content. + +In cases where the label and content are inconsistent, the content takes precedence. If the content is correct, the answer is marked as correct; if the content is incorrect, the answer is marked as incorrect, even if the option label is correct (see the final annotation example for reference). + +# - Short Answer + +Short-answer questions may require responses such as names, locations, numbers, dates, or full sentences. The evaluation criteria are: + +- For concise answers (e.g., names, places, dates), strict string matching is required. +- For sentence-level answers, semantic consistency with the reference answer is required. +- For numerical answers, mathematical equivalence must be verified (e.g., "12000" and "12,000" are considered equivalent). + +# - Classification + +Classification questions come with a fixed set of candidate answers. The LLM output must explicitly and exactly match the correct answer in this set to be judged as correct. + +# Math + +For mathematical questions, the final answer in the LLM output must be mathematically equivalent to the reference answer. Evaluation criteria include: + +- If an initial answer (ans1) is given but followed by a derived final answer (ans2) through calculation, ans2 should be used for evaluation. +- If the LLM output or ground-truth answer is provided in LaTeX format and cannot be visually interpreted, a LaTeX compiler should be used to determine equivalence. + +# Special cases: + +# Overly Long Responses + +If the LLM output is excessively long, use the final answer provided as the basis for judgment. If the response does not converge to a clear answer (e.g., repeated changes or ambiguity), it should be marked as incorrect. + +# - Truncated Calculations + +In long responses where the final verification or calculation is truncated, it can be ignored. If a clear answer was provided earlier, use it for evaluation; if not, mark the response as incorrect (see the second-to-last annotation example). + +# Unfamiliar Domains + +If the correctness of the LLM response cannot be determined due to unfamiliar domain knowledge (e.g., mathematical expressions whose equivalence is hard to judge), the case may be skipped and will later be annotated by more qualified annotators. + +# - Irrelevant but Matched Answers + +If the LLM output is irrelevant to the question but coincidentally contains the correct final answer, it should be marked as incorrect. + +# B.3 Examples from the VAR Dataset + +```jsonl +{ "dataset": "SimpleQA", "question": "In which year did Fayaz A. Malik (an Indian pharmacologist, cancer biologist, and scientist) receive the Young Scientist of the Year from the Council of Scientific and Industrial Research?", "question_type": "short answer", "correct_answer": "2009", "llm_output": "The year Fayaz A. Malik received the Young Scientist of the Year award from the Council of Scientific and Industrial Research was 2001\. \textbackslash textbackslash n\textbackslash textbackslash nThe answer is 2001.", "humanjudge_result": "Incorrect" } { "dataset": "MMLU-Redux_enh", "question": "If you wanted to find the global distribution of coal , you would use a Answer Choices: (I) reference map. (II) topographic map. (III) thematic map. (IV) location map.", "question_type": "multiple choice", "correct_answer": "III", "llm_output": "The answer is (III) thematic map.", "humanjudge_result": "Correct" } { "dataset": "Amazon", "question": "Lightsaber Action!!: This game is absolutely the best game I have ever seen! there is a little bit of violence, but most of the time you kill droids, not humans. My favorite weapon is the lightsaber, because you can deflect laser blasts\. \textbackslash textbackslash n Please identify the sentiment polarity of the sentence: positive or negative", "question_type": "classification", "correct_answer": "positive", "llm_output": "The answer is positive.", "humanjudge_result": "Correct" } { "dataset": "GSM8K", "question": "Daisy and Rose were enjoying their backyard pool with their dogs. If there are 24 legs/paws in the pool, how many dogs do Daisy and Rose have?", "question_type": "math", "correct_answer": "5", "llm_output": "To find the number of dogs Daisy and Rose have, we need to first determine the total number of people and then subtract that from the total number of legs/paws\.n\sSince each person has 2 legs, we can divide the total number of legs/paws by 2 to find the number of people\.n24 legs/paws / 2 legs/person = 12 people\n\sSince Daisy and Rose are the only people in the pool, we can subtract them from the total number of people to find the number of dogs\.n12 people - 2 people = 10 dogs\n\nTherefore, Daisy and Rose have 10 dogs\.". "humanjudge_result": "Incorrect" } +``` + +# C Model Training Details + +This section will further present additional information about the training of the xVerify model. + +# C.1 Training Hyperparameters + +The xVerify model is trained using the QLoRA method, with consistent hyperparameter settings across all base models. The training is carried out on multiple GPU servers. Table 17 presents the key training hyperparameters. + +
HyperparameterSetting
Per Device Train Batch Size1
Gradient Accumulation Steps8
Learning Rate1.0e-4
Num Train Epochs1.0
LrScheduler Typecosine
Warmup Ratio0.1
Bf16true
Ddp Timeout180000000
Lora Rank8
+ +Table 17: Hyperparameter settings for model training. + +# C.2 Original Model Details + +This paper uses 14 original models of different parameter scales and types for training on the VAR dataset. Table 18 presents the relevant information for all xVerify models and their corresponding original models. + +Table 18: Details of Original Models and Corresponding xVerify Models. Sorted by Original Model Name. + +
Original Model#Para.TypeContext LengthxVerify Model
Gemma-2-2B-it2BInstruct8KxVerify-2B-I
Gemma-2-9B-it9BInstruct8KxVerify-9B-I
Gemma-2-27B-it27BInstruct8KxVerify-27B-I
GLM-4-9B-Chat9BChat128KxVerify-9B-C
Llama-3.2-1B-Instruct1BInstruct128KxVerify-1B-I
Llama-3.2-3B-Instruct3BInstruct128KxVerify-3B-Ia
Llama-3.1-8B-Instruct8BInstruct128KxVerify-8B-I
Phi-414BInstruct16kxVerify-14B-Ib
Qwen2.5-0.5B-Instruct0.5BInstruct128KxVerify-0.5B-I
Qwen2.5-1.5B-Instruct1.5BInstruct128KxVerify-1.5B-I
Qwen2.5-3B-Instruct3BInstruct128KxVerify-3B-Ib
Qwen2.5-7B-Instruct7BInstruct128KxVerify-7B-I
Qwen2.5-14B-Instruct14BInstruct128KxVerify-14B-Ia
Qwen2.5-32B-Instruct32BInstruct128KxVerify-32B-I
+ +# D Prompts + +This section will present all the prompt templates used in the experiments of this paper. + +# D.1 Prompts for Generating LLM Responses + +The prompt templates used to generate LLM responses are illustrated in Figures 4 to 7. Each template consists of four fields that need to be populated: "task_type", "task_description", "examples", and + +"question". The "task_type" and "task_description" fields are determined based on the type of question. For instance, for questions from the GPQA dataset, "task_type" is set to "multidisciplinary question", and "task_description" is set to "Please choose the answer from options A to D, corresponding to the question." During dataset preprocessing, we design appropriate "task_type" and "task_description" values for each dataset. The "examples" field is filled according to the selected prompting strategy, either 0-shot or 5-shot. In the 0-shot setting, this field is left empty, while in the 5-shot setting, it is populated with five example question-answer pairs that are similar to the target "question". The "question" field contains the specific query to be answered by the LLM. Examples of the "examples" and "question" fields are shown in Figures 8 and 9, respectively. + +```txt +You are an expert in {task_type}, {task_description} +{examples} +{question} +``` + +Figure 4: Few-shot prompt for generating LLM responses. + +```txt +You are an expert in {task_type}, {task_description} +{examples} +{question} +End your final answer with 'The answer is . +``` + +Figure 5: Few-shot-restrict prompt for generating LLM responses. + +```txt +You are an expert in {task_type}, {task_description} +{examples} +{question} +Let's think step by step. +``` + +Figure 6: Few-shot-cot prompt for generating LLM responses. + +# D.2 Prompts for GPT-4o Annotation + +The prompt templates used for annotating the collected LLM question-answer pairs with GPT-4o during the construction of the VAR dataset are shown in Figures 10 and 11. Both of these prompt templates employ the Chain-of-Thought (CoT) strategy to ensure the accuracy of the annotations generated by GPT-4o. + +# D.3 Prompts for Data Augmentation + +In constructing the VAR dataset, two prompt templates used to guide GPT-4o in augmenting mathematical question samples are presented in Figures 12 and 13. + +# D.4 Prompts for Judge Model + +In the experiments of this paper, the prompts used for all judge models were constructed based on the official templates provided by their respective developers. However, for some judge models, the official prompt templates were not fully compatible with the evaluation tasks in this paper, so other similar prompt templates were used. Specifically, Figure 14 shows the prompt template used by GPT-4o as Judge, Figure 15 shows the prompt template used by GPT-4o as Judge (CoT), Figure 16 shows the prompt template used by JudgeLM series models and PandaLM-7B-v1, Figure 17 shows the prompt template used by Auto-J series models, and Figure 18 shows the prompt template used + +You are an expert in {task_type}, {task_description} +{examples} +{question} + +Let's think step by step. + +End your final answer with 'The answer is . + +Figure 7: Few-shot-cot-restrict prompt for generating LLM responses. + +# ***** Start In-Context Examples **** + +Q: A late game rally by Washington led them to the Eagles' 26 yard line. A shot to the end zone by Robert Griffin III would be intercepted by Brandon Boykin, clinching an Eagles win. The Eagles would move to 6-5. This is the Eagles first win at Lincoln Financial Field since Week 4 of the 2012 season, because prior to this game, the Eagles had never won a game in their home stadium in 414 days since that same week, snapping a 10-game losing streak at home with this win. How many more wins than losses did the Eagles have after this game? + +A: The answer is 1. + +Q: The population of Sevastopol proper is 418,987 (01.01.16), making it the largest in the Crimean Peninsula. The city's agglomeration has about 600,000 people (2015). According to the Ukrainian Census (2001), the ethnic groups of Sevastopol include Russians (71.6%), Ukrainians (22.4%), Belarusians (1.6%), Tatars (0.7%), Crimean Tatars (0.5%), Armenians (0.3%), Jews (0.3%), Moldovans (0.2%), and Azerbaijani people (0.2%). Which ethnic has a higher percentage of the population in Sevastopol: Russians or Armenians? + +A: The answer is Russians. + +Q: the most common crimes in the ACT are property related crimes, unlawful entry with intent and motor vehicle theft. They affected 2,304 and 966 people (580 and 243 per 100,000 persons respectively). Homicide and related offences—murder, attempted murder and manslaughter, but excluding driving causing death and conspiracy to murder—affect 1.0 per 100,000 persons, which is below the national average of 1.9 per 100,000. Rates of sexual assault (64.4 per 100,000 persons) are also below the national average (98.5 per 100,000). Which was there a higher national average for, homicide and related offences or sexual assault? + +A: The answer is sexual assault. + +Q: In the county, the population was spread out with $21.7\%$ under the age of 18, $8.5\%$ from 18 to 24, $26.9\%$ from 25 to 44, $27.7\%$ from 45 to 64, and $15.0\%$ who were 65 years of age or older. The median age was 40 years. For every 100 females, there were 94.4 males. For every 100 females age 18 and over, there were 98.7 males. How many percent were not from 45 to 64? + +A: The answer is 72.3. + +Q: The median age in the city was 35.1 years. $24.2\%$ of residents were under the age of 18; $7.9\%$ were between the ages of 18 and 24; $33.8\%$ were from 25 to 44; $24.6\%$ were from 45 to 64; and $9.5\%$ were 65 years of age or older. The gender makeup of the city was $48.6\%$ male and $51.4\%$ females. How many more people, in terms of percentage, were in the largest age group compared to the second smallest? + +A: The answer is 24.3. + +***** End In-Context Examples **** + +Figure 8: Example of "examples" fields. + +Q: Let $ABCD$ be a tetrahedron such that $AB = CD = \sqrt{41}$ , $AC = BD = \sqrt{80}$ , and $BC = AD = \sqrt{89}$ . There exists a point $I$ inside the tetrahedron such that the distances from $I$ to each of the faces of the tetrahedron are all equal. This distance can be written in the form $\frac{m\sqrt{n}}{p}$ , where $m, n$ , and $p$ are positive integers, $m$ and $p$ are relatively prime, and $n$ is not divisible by the square of any prime. Find $m + n + p$ . + +A: + +Figure 9: Example of "question" fields. + +You are a diligent and precise assistant tasked with evaluating the correctness of responses. Think step by step as you make your evaluation. + +You will receive a question, an output sentence, and the correct answer. Your task is to determine if the output sentence accurately answers the question based on the provided correct answer. Think step by step and respond with either [Correct] or [Incorrect]. + +# Special considerations: + +1. **Multiple Answers**: If the output contains multiple answers, evaluate whether later answers modify or correct earlier ones. In such cases, compare the final answer with the correct answer. If the final answer is unclear or incorrect, respond with [Incorrect]. +2. **Mathematical Problems**: If the formats differ but the answers are mathematically equivalent, respond with [Correct]. +3. **Explicit Options**: If the question provides explicit candidate answers, the output will be considered correct if it clearly indicates the correct option's code or the correct option's content. +4. **No Explicit Options**: If the question does not provide explicit options, the output must align with the correct answer in content and meaning to be considered [Correct]. + +```txt +Please present your response in the following JSON format: { "reasoning": "Your step-by-step reasoning here.", "judgment": "Correct or Incorrect" } Question: ""{question}"" Output sentence: ""{output}"" Correct answer: {answer} +``` + +Figure 10: Prompt I for GPT-4o annotation. + +by Prometheus series models. The official prompt template for the CompassJudger-1 series models corresponds to pairwise evaluation, so the prompt template used by this series is the same as that for the xVerify model, as shown in Figure 19. + +# D.5 Prompts for xVerify + +Figure 19 shows the prompt template used to construct the input for the xVerify model. This template is used both for training and evaluation of the xVerify model. Specifically, "question," "output," and "answer" correspond to the question content, the LLM response, and the reference answer, respectively. + +# E Supplementary Experimental Results + +# E.1 Evaluation Accuracy Results of All xVerify Models + +Tables 19 and 20 present the performance of all $14\mathrm{x}$ Verify models on the test set and the generalization set, respectively. Overall, each xVerify model achieves an F1 score and accuracy exceeding $96.5\%$ + +You are a diligent and precise assistant tasked with evaluating the correctness of responses. Think step by step as you make your evaluation. + +We request your feedback on whether the model's response correctly answers the user question above. Follow these steps to make your evaluation: + +1. Think step by step: Read the user question carefully. +2. Think step by step: Review the reference answer and understand the key points it covers. +3. Think step by step: Compare the model's answer with the reference answer. +4. Think step by step: Determine if the model's answer addresses the key points in the reference answer and correctly answers the question. + +First, provide your reasoning in detail. Then, clearly state your judgment as either "Correct" or "Incorrect." + +```txt +Please present your response in the following JSON format: +{ "reasoning": "Your step-by-step reasoning here.", "judgment": "Correct or Incorrect" +} +Question: {question} +Reference Answer: {answer} +Model's Answer: {output} +``` + +Figure 11: Prompt II for GPT-4o annotation. + +on the test set and over $95.52\%$ on the generalization set. These results demonstrate not only the effectiveness of the xVerify models for evaluation tasks but also the high quality of the VAR dataset. + +A comparison between the results on the two datasets shows that the performance on the generalization set experiences a slight decline relative to the test set, with the decrease not exceeding $1.6\%$ . Moreover, models with larger parameter sizes exhibit smaller performance drops. This indicates that the xVerify models possess strong generalization capabilities, which further improve with an increase in parameter scale. Additionally, it is observed across both datasets that while the performance of xVerify models generally enhances with the increment of parameter size, beyond a certain threshold, further increases in parameter scale do not lead to additional performance gains. + +Table 19: Evaluation Accuracy Results on the Test Set: All xVerify Models. The best performance in each column is shown in **bold**, and the second-best performance is underlined. + +
xVerify ModelMultiple ChoiceMathShort AnswerClassificationTotal
F1Acc.F1Acc.F1Acc.F1Acc.F1Acc.
xVerify-0.5B-I97.78%97.90%93.74%94.64%96.72%97.49%99.71%99.59%96.69%96.85%
xVerify-1B-I97.22%97.35%94.76%95.45%96.06%96.97%99.71%99.59%96.77%96.91%
xVerify-1.5B-I97.85%97.96%95.10%95.75%96.05%96.97%99.63%99.49%97.05%97.17%
xVerify-2B-I97.93%98.02%95.06%95.71%96.06%96.97%99.78%99.69%97.09%97.21%
xVerify-3B-Ia97.73%97.84%95.00%95.67%96.17%97.06%99.71%99.59%97.02%97.14%
xVerify-3B-Ib97.31%97.41%95.65%96.18%96.38%97.23%99.78%99.69%97.17%97.27%
xVerify-7B-I97.75%97.84%95.94%96.44%96.51%97.32%99.78%99.69%97.41%97.50%
xVerify-8B-I97.92%98.02%95.34%95.97%96.05%96.97%99.71%99.59%97.17%97.29%
xVerify-9B-C98.29%98.38%95.26%95.88%96.06%96.97%99.78%99.69%97.25%97.37%
xVerify-9B-I97.43%97.53%95.75%96.27%96.06%96.97%99.78%99.69%97.19%97.29%
xVerify-14B-Ia97.49%97.59%95.73%96.22%95.41%96.46%99.63%99.49%97.06%97.16%
xVerify-14B-Ib97.67%97.78%96.10%96.57%95.74%96.72%99.71%99.59%97.31%97.40%
xVerify-27B-I97.81%97.90%95.46%96.01%96.19%97.06%99.56%99.38%97.15%97.26%
xVerify-32B-I97.81%97.90%95.88%96.31%96.18%97.06%99.71%99.59%97.32%97.40%
+ +# E.2 Computational Efficiency and Operational Cost of xVerify and Judge Models + +Table 21 displays the running time performance of the xVerify model and other judge models. Each model was evaluated using 200 randomly selected samples per question type from the generalization set, with running times measured in seconds. This data provides insights into the computational + +You are an expert in mathematical calculations and data expressions. You are required to provide different equivalent forms of the standard answer for the following math problem. + +Problem: {question} + +Answer: {answer} + +Example 1: + +Problem: Let $ \alpha \beta \gamma be the radian measure of the smallest angle in a $3-4-5$ right triangle. Let $ \alpha \beta \gamma be the radian measure of the smallest angle in a $7-24-25$ right triangle. Express $ \alpha \beta \gamma in terms of $ \alpha \beta \gamma$. + +Answer: \\frac{\backslashpi}{2} - 2\alpha + +Output: + +```javascript +"\"json { + "answer1": "\"\pi/2 - 2\alpha", + "answer2": "pi/2 - 2\alpha", + "answer3": "pi/2 - 2 * \alpha", + "answer4": "0.5 * \pi - 2 * \alpha" +}"); +``` + +Example 2: + +Problem: A volcano erupts and spews ash into the sky. The ash cloud spreads out in a diameter eighteen times as far as the distance it shot up into the sky. If the ashes erupted three hundred feet into the sky, what was the radius of the ash cloud in feet? + +Answer: 2700 + +```txt +Output: +``` +"\"json { + "answer1": "2.7×10^3", + "answer2": "2700.0", + "answer3": "2.7 \times times 10^3", + "answer4": "$2.7 \times times 10^3$", + "answer5": "Two thousand seven hundred"}'' +``` + +Please note: + +1. You need to provide 3 to 5 different standard forms of the answer + +2. Each different form must be equivalent to the standard answer, i.e., it should still be a correct and valid answer. + +3. You may use LaTeX, scientific notation, or other standard mathematical expressions. + +```txt +4. Please follow the JSON format below for the output: +``` +"\"json { + "answer1": "xxx", "answer2": "xxx", "answer3": "xxx", ... +}..." +``` + +Figure 12: Prompt for Generating Alternative Reference Answers. + +```txt +You are an expert in mathematical calculations and data expressions. For an answer to a specific mathematical problem, you are required to provide equivalent and different expressions of the mathematical result. Answer: {output} +Example 1: Answer: The answer is $\beta = \backslash$ frac{pi{2}-2\alpha}. Output: "'json { "answer1": "The answer is $\backslash$ pi/2 - 2\alpha}. , "answer2": "The answer is pi/2 - 2\alpha}. , "answer3": "The answer is pi/2 - 2\* alpha.", "answer4": "The answer is $0.5*$ pi-2\* alpha." }"" +Example 2: Answer: The answer is 2700 feet. Output: "'json { "answer1": "The answer is $2.7\times 10^{-3}$ feet.", "answer2": "The answer is 2700.0 feet.", "answer3": "The answer is 2.7 times $10^{-3}$ feet.", "answer4": "The answer is $\$ 2.7$ times $10^{-3}\{3\}$ feet.", "answer5": "The answer is Two thousand seven hundred feet." }"" +Please note: 1. You need to provide 3 to 5 different expressions, each replacing the mathematical result with an equivalent and different form. 2. Each expression must be exactly equivalent to the target answer to ensure its correctness. 3. You can use LaTeX, scientific notation, or other standard mathematical formats. 4. Please output the result in the following JSON format: "'json { "answer1": "The answer is xxx", "answer2": "The answer is xxx", "answer3": "The answer is xxx", "answer4": "The answer is xxx", "answer5": "The answer is xxx" }"" +``` + +Figure 13: Prompt for Generating Diverse Final Answer Expressions. + +You are a diligent and precise assistant tasked with evaluating the correctness of responses. You will receive a question, an output sentence, and the correct answer. Your task is to determine if the output sentence accurately answers the question based on the provided correct answer. Respond with either [Correct] or [Incorrect]. + +# Special considerations: + +1. **Multiple Answers**: If the output contains multiple answers, evaluate whether later answers modify or correct earlier ones. In such cases, compare the final answer with the correct answer. If the final answer is unclear or incorrect, respond with [Incorrect]. +2. **Mathematical Problems**: If the formats differ but the answers are mathematically equivalent, respond with [Correct]. +3. **Explicit Options**: If the question provides explicit candidate answers, the output will be considered correct if it clearly indicates the correct option's code or the correct option's content. +4. **No Explicit Options**: If the question does not provide explicit options, the output must align with the correct answer in content and meaning to be considered [Correct]. + +Please present your response in the following JSON format: + +```json +{" judgement": "Correct or Incorrect"} +``` + +Question: ""{question}"" + +Output sentence: ""{response}"" + +Correct answer: {reference} + +Figure 14: Prompt for GPT-4o as Judge. + +You are a diligent and precise assistant tasked with evaluating the correctness of responses. Think step by step as you make your evaluation. You will receive a question, an output sentence, and the correct answer. Your task is to determine if the output sentence accurately answers the question based on the provided correct answer. Think step by step and respond with either [Correct] or [Incorrect]. + +# Special considerations: + +1. **Multiple Answers**: If the output contains multiple answers, evaluate whether later answers modify or correct earlier ones. In such cases, compare the final answer with the correct answer. If the final answer is unclear or incorrect, respond with [Incorrect]. +2. **Mathematical Problems**: If the formats differ but the answers are mathematically equivalent, respond with [Correct]. +3. **Explicit Options**: If the question provides explicit candidate answers, the output will be considered correct if it clearly indicates the correct option's code or the correct option's content. +4. **No Explicit Options**: If the question does not provide explicit options, the output must align with the correct answer in content and meaning to be considered [Correct]. + +Please present your response in the following JSON format: + +```txt +"reasoning": "Your step-by-step reasoning here.", "judgement": "Correct or Incorrect" +``` + +Question: ""{question}"" + +Output sentence: ""{response}" + +Correct answer: {reference} + +Figure 15: Prompt for GPT-4o as Judge (CoT). + +```txt +You are a helpful and precise assistant for checking the quality of the answer. +[Question] +{question} +[Reference Answer] +{reference} +[Model's Answer] +{response} +[System] +We would like to request your feedback on the performance of the model's response to the user question displayed above. +Based on the reference answer, please rate the accuracy of the response. The model receives an overall score on a scale of 1 to 10, where a higher score indicates better overall performance. +Please first output a single line containing only the score. In the subsequent line, please provide a comprehensive explanation of your evaluation, avoiding any potential bias. +```java +```java +You are a helpful and precise assistant for checking the quality of the answer. +[Question] +{question} +[Reference Answer] +{reference} +[Model's Answer] +{response} +[System] +``` + +Figure 16: Prompt for JudgeLM. + +```txt +[INST] Write critiques for a submitted response on a given user's query, incorporating the correct answer as a reference, and grade the response accordingly: +``` + +```javascript +[BEGIN DATA] +\*\*\* +[Query]: {question} +\*\*\* +[Correct Answer]: {reference} +\*\*\* +[Response]: {response} +\*\*\* +[END DATA] +Write critiques for this response. After that, you should give a final rating for the response on a scale of 1 to 10 by strictly following this format: "[rating]", for example: "Rating: [[5]]". [/INST] +``` + +Figure 17: Prompt for Auto-J. + +Table 20: Evaluation Accuracy Results on the Generalization Set: All xVerify Models. The best performance in each column is shown in **bold**, and the second-best performance is **underlined**. + +
xVerify ModelMultiple ChoiceMathShort AnswerClassificationTotal
F1Acc.F1Acc.F1Acc.F1Acc.F1Acc.
xVerify-0.5B-I96.49%96.10%80.00%91.94%96.95%97.00%99.03%98.53%95.29%95.53%
xVerify-1B-I96.10%95.66%82.45%92.51%97.32%97.35%98.92%98.37%95.43%95.62%
xVerify-1.5B-I96.76%96.38%83.58%93.12%97.46%97.49%98.88%98.29%95.85%96.03%
xVerify-2B-I96.27%95.82%82.11%92.51%97.60%97.63%98.98%98.45%95.57%95.75%
xVerify-3B-Ia96.44%95.99%86.10%94.25%97.31%97.35%99.03%98.53%96.11%96.27%
xVerify-3B-Ib96.21%95.71%86.20%94.15%97.60%97.63%99.03%98.53%96.08%96.23%
xVerify-7B-I96.16%95.66%87.86%94.87%97.45%97.49%98.93%98.37%96.22%96.37%
xVerify-8B-I96.67%96.27%86.76%94.61%97.45%97.49%99.03%98.53%96.33%96.49%
xVerify-9B-C97.00%96.66%87.08%94.71%97.45%97.49%98.98%98.45%96.45%96.61%
xVerify-9B-I96.06%95.55%87.47%94.76%97.53%97.56%99.13%98.68%96.23%96.38%
xVerify-14B-Ia96.11%95.60%90.20%95.74%97.32%97.35%99.13%98.68%96.53%96.65%
xVerify-14B-Ib96.35%95.88%87.88%94.92%97.45%97.49%98.93%98.37%96.30%96.44%
xVerify-27B-I96.01%95.49%85.64%93.99%97.32%97.35%99.13%98.68%95.93%96.09%
xVerify-32B-I96.22%95.71%90.09%95.59%97.32%97.35%99.03%98.53%96.50%96.60%
+ +efficiency of each model under uniform testing conditions, thereby facilitating a comparative analysis of their real-time processing capabilities and scalability in practical applications. + +You are a fair judge assistant tasked with providing clear, objective feedback based on specific criteria, ensuring each assessment reflects the absolute standards set for performance." + +Task Description: + +An instruction (might include an Input inside it), a response to evaluate, a reference answer that gets a score of 5, and a score rubric representing a evaluation criteria are given. + +1. Write a detailed feedback that assess the quality of the response strictly based on the given score rubric, not evaluating in general. +2. After writing a feedback, write a score that is an integer between 1 and 5. You should refer to the score rubric. +3. The output format should look as follows: "Feedback: (write a feedback for criteria) [RESULT] (an integer number between 1 and 5)" 4. Please do not generate any other opening, closing, and explanations. + +The instruction to evaluate: + +{question} + +##Response to evaluate: + +{response} + +Reference Answer (Score 5): + +{reference} + +Score Rubrics: + +[Does the model demonstrate logical and effective reasoning in its responses?] + +Score 1: The model's responses show a complete lack of logical reasoning, often resulting in irrelevant or nonsensical answers. + +Score 2: The model occasionally shows signs of logical reasoning but generally struggles to provide coherent or relevant responses. + +Score 3: The model usually demonstrates basic reasoning capabilities, though it may not consistently apply logical principles or fully resolve complex issues. + +Score 4: The model frequently exhibits strong reasoning skills, effectively addressing complex questions with minor inconsistencies or errors. + +Score 5: The model consistently demonstrates advanced reasoning abilities, providing logically sound, coherent, and sophisticated responses to complex queries. + +Feedback: + +Figure 18: Prompt for Prometheus. + +You are a diligent and precise assistant tasked with evaluating the correctness of responses. You will receive a question, an output sentence, and the correct answer. Your task is to determine if the output sentence accurately answers the question based on the provided correct answer. Respond with either [Correct] or [Incorrect]. + +Special considerations: + +1. **Multiple Answers**: If the output contains multiple answers, evaluate whether later answers modify or correct earlier ones. In such cases, compare the final answer with the correct answer. If the final answer is unclear or incorrect, respond with [Incorrect]. +2. **Mathematical Problems**: If the formats differ but the answers are mathematically equivalent, respond with [Correct]. +3. **Explicit Options**: If the question provides explicit candidate answers, the output will be considered correct if it clearly indicates the correct option's code or the correct option's content. +4. **No Explicit Options**: If the question does not provide explicit options, the output must align with the correct answer in content and meaning to be considered [Correct]. + +Question: ""{question}"" + +Output sentence: ""{output}" + +Correct answer: {answer} + +Judgement: + +Figure 19: Prompt for xVerify. + +All models were executed on GPUs with identical configurations. Specifically, Prometheus-8x7B-v2.0, JudgeLM-33B-v1.0, CompassJudger-1-32B, xVerify-27B-I, and xVerify-32B-I were deployed on two GPUs for inference, while the remaining models were deployed on a single GPU. From Table 21, it is evident that all xVerify models exhibit an overall average runtime within 100 seconds, whereas the overall average runtime for the other judge models exceeds 100 seconds. Moreover, for each question category, the models with the shortest evaluation times are the xVerify models. Thus, the xVerify models demonstrably surpass the other judge models in terms of evaluation efficiency. + +Table 22 presents the evaluation costs incurred when employing GPT-4o as the judge, based on assessments of 200 randomly selected samples per question type, along with the overall expenditure. Apart from the prerequisite deployment overhead, the cost of invoking the xVerify models for evaluation is substantially lower than that of GPT-4o. Additionally, compared to GPT-4o, which relies on remote server deployment, the locally deployed xVerify models offer higher invocation efficiency. Taken together, these results underscore that the xVerify models outperform the other judge models in both usage cost and evaluation efficiency. + +Table 21: Running Time Comparison of xVerify Models and Other Judge Models (200 Samples per Question Type). The best performance in each column is shown in **bold**, and the second-best performance is underlined. + +
Method TypeMethodMultiple Choice (s)Math (s)Short Answer (s)Classification (s)Avg (s)
Judge ModelPandaLM-7B-v1304.5076.2476.9765.79130.88
Auto-J-Bilingual-6B1,570.441,802.711,194.081,148.321,428.89
Auto-J-13B3,055.003,622.702,807.231,903.002,846.98
Prometheus-7B-v2.01,173.80947.71706.74696.34881.15
Prometheus-8x7B-v2.01,557.101,128.081,132.84750.511,142.13
JudgeLM-7B-v1.0551.88469.10394.57348.05440.90
JudgeLM-13B-v1.0777.73598.19564.25529.60617.44
JudgeLM-33B-v1.01,041.831,018.37789.80762.99903.25
CompassJudger-1-1.5B189.45244.08139.50110.95171.00
CompassJudger-1-7B163.96568.72450.2080.58315.87
CompassJudger-1-14B346.80571.66217.86196.18333.13
CompassJudger-1-32B147.53258.10133.59152.11172.83
xVerifyxVerify-0.5B-I38.9741.2539.1238.8739.55
xVerify-1B-I33.9136.6333.4433.4734.36
xVerify-1.5B-I43.0546.8742.1742.0843.54
xVerify-2B-I38.4473.1639.2937.3847.07
xVerify-3B-Ia38.5444.5437.1143.0240.80
xVerify-3B-Ib46.9353.58106.0647.8463.60
xVerify-7B-I68.2495.5050.6651.6766.52
xVerify-8B-I78.0661.5745.3446.8257.95
xVerify-9B-C131.0770.1651.6652.5776.37
xVerify-9B-I54.2069.9149.4151.0656.15
xVerify-14B-Ia59.18114.9155.5054.8071.10
xVerify-14B-Ib61.17145.19116.4357.5595.09
xVerify-27B-I85.2889.4158.9961.0073.67
xVerify-32B-I131.0598.9964.7467.4590.56
+ +Table 22: Total costs (in USD) of GPT-4o as Judge (200 Samples per Question Type). + +
MethodMultiple Choice ($)Math ($)Short Answer ($)Classification ($)Total ($)
GPT-4o as Judge0.310.660.240.271.48
GPT-4o as Judge (CoT)0.551.000.420.482.45
\ No newline at end of file diff --git a/data/2025/2504_10xxx/2504.10481/images/022d0e835114f130f558611eb0eb1ecb4e306987c7a3bc23db29b6b363a9a7bb.jpg b/data/2025/2504_10xxx/2504.10481/images/022d0e835114f130f558611eb0eb1ecb4e306987c7a3bc23db29b6b363a9a7bb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e558e880866a061e5a48af60e2c788d860399407 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10481/images/022d0e835114f130f558611eb0eb1ecb4e306987c7a3bc23db29b6b363a9a7bb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b0325fa67de7b648b4d6eda3de97163fd5bd108c194e7e1023b6d735eeedf565 +size 22559 diff --git a/data/2025/2504_10xxx/2504.10481/images/0629df9d0b19bee4203560dc4a695c83d0ddc6503641996064d63f483195b57e.jpg b/data/2025/2504_10xxx/2504.10481/images/0629df9d0b19bee4203560dc4a695c83d0ddc6503641996064d63f483195b57e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5deaf09a85f099f8a078e8f21fd2f4a85dbc0d30 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10481/images/0629df9d0b19bee4203560dc4a695c83d0ddc6503641996064d63f483195b57e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2eedb71c0ceb500db422032ccdd90c8aeefecc2a2d9e38cdf40e449cdd6c4fd4 +size 50495 diff --git a/data/2025/2504_10xxx/2504.10481/images/0a73b1232815b5f11a0943458d3e6c251b60e51f233064cae9b36a3e559a35f7.jpg b/data/2025/2504_10xxx/2504.10481/images/0a73b1232815b5f11a0943458d3e6c251b60e51f233064cae9b36a3e559a35f7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d5a7dcd7b3b2125c95df0442e1d7a059a805ea0b --- /dev/null +++ b/data/2025/2504_10xxx/2504.10481/images/0a73b1232815b5f11a0943458d3e6c251b60e51f233064cae9b36a3e559a35f7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8dbf6c9fd18dee423964f63229bf0fb394f45e148e94e9ec8147dba29f62944b +size 15113 diff --git a/data/2025/2504_10xxx/2504.10481/images/159d98ee817e85da35d1027d4375795bab56e66374089e82404d211381bf408d.jpg b/data/2025/2504_10xxx/2504.10481/images/159d98ee817e85da35d1027d4375795bab56e66374089e82404d211381bf408d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..79b6dfb64597d0407c5ed0448787eb9f3c3471c4 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10481/images/159d98ee817e85da35d1027d4375795bab56e66374089e82404d211381bf408d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ce7e2cbdfd22dda817febf6fe5e8abce1960be1041adc05741c3949b692adb40 +size 16050 diff --git a/data/2025/2504_10xxx/2504.10481/images/3dac7cbc26dfe8f1ceb6cbaed76906c9a6301581c993f71b06d82a52f269f72d.jpg b/data/2025/2504_10xxx/2504.10481/images/3dac7cbc26dfe8f1ceb6cbaed76906c9a6301581c993f71b06d82a52f269f72d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..93e45567dd19597317e95fdd123cf2a36ec935dd --- /dev/null +++ b/data/2025/2504_10xxx/2504.10481/images/3dac7cbc26dfe8f1ceb6cbaed76906c9a6301581c993f71b06d82a52f269f72d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:09b1ed5cb067266a0615b771d9dc8eae41678040e86454a5226f5f6a30d84dce +size 55114 diff --git a/data/2025/2504_10xxx/2504.10481/images/4f10bb715cd101b2ec27be50e1ac22a3276437b0b6ed62ae6bc588de54844429.jpg b/data/2025/2504_10xxx/2504.10481/images/4f10bb715cd101b2ec27be50e1ac22a3276437b0b6ed62ae6bc588de54844429.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ecdc88f412f3894204fa283a86e3cbe6f4820355 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10481/images/4f10bb715cd101b2ec27be50e1ac22a3276437b0b6ed62ae6bc588de54844429.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:27f5034a480da1676c2851b94fe8ce121bf850c6e9f3de7abd0035ab30902d83 +size 4807 diff --git a/data/2025/2504_10xxx/2504.10481/images/66b6457509766ada8a1bb0ea03cd9f4f8d43c958b3a107ea849da0475400826c.jpg b/data/2025/2504_10xxx/2504.10481/images/66b6457509766ada8a1bb0ea03cd9f4f8d43c958b3a107ea849da0475400826c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b62f49518ad2b901844eb9e13818de1511ebf9dc --- /dev/null +++ b/data/2025/2504_10xxx/2504.10481/images/66b6457509766ada8a1bb0ea03cd9f4f8d43c958b3a107ea849da0475400826c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:feb2af886ec92703802c8c4d9e4d382897bed131c809c5cb1bb0af4dd13532a1 +size 98512 diff --git a/data/2025/2504_10xxx/2504.10481/images/6a5b1b67cfb7ccb598fc6542b553d7a1537f4ffe9a234815ba02d2db50f43191.jpg b/data/2025/2504_10xxx/2504.10481/images/6a5b1b67cfb7ccb598fc6542b553d7a1537f4ffe9a234815ba02d2db50f43191.jpg new file mode 100644 index 0000000000000000000000000000000000000000..68f5e5a0eced9d8c3d084cff1bccad24bb298235 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10481/images/6a5b1b67cfb7ccb598fc6542b553d7a1537f4ffe9a234815ba02d2db50f43191.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9ec4ef3bb6c5618259971aebe72c2053b40048de2b9a7cdc24eb8b27dfb5806f +size 4896 diff --git a/data/2025/2504_10xxx/2504.10481/images/6d439d7f1c4119a53063123e4a1a272a2937ccfe8b265eebde7d815d650c02cb.jpg b/data/2025/2504_10xxx/2504.10481/images/6d439d7f1c4119a53063123e4a1a272a2937ccfe8b265eebde7d815d650c02cb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..babc4c4394b9ae8e98f44f590cf0683130b4d88c --- /dev/null +++ b/data/2025/2504_10xxx/2504.10481/images/6d439d7f1c4119a53063123e4a1a272a2937ccfe8b265eebde7d815d650c02cb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0fbecb0914c70d55981c9bf8f7a0be5d7d59d71653848b171d5093ba49bcf8eb +size 120054 diff --git a/data/2025/2504_10xxx/2504.10481/images/73a9453381a215760714368ac5cb85025f3e7bd128dfccae5838a59ffcaadddf.jpg b/data/2025/2504_10xxx/2504.10481/images/73a9453381a215760714368ac5cb85025f3e7bd128dfccae5838a59ffcaadddf.jpg new file mode 100644 index 0000000000000000000000000000000000000000..197ff2c45c4dde4c6cd7397b2d16a5b131c5206f --- /dev/null +++ b/data/2025/2504_10xxx/2504.10481/images/73a9453381a215760714368ac5cb85025f3e7bd128dfccae5838a59ffcaadddf.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:284b5f5487057add2e6e5aa573f8b3c602cb0d72ea0e33d2e2809b36ed8d7f36 +size 53912 diff --git a/data/2025/2504_10xxx/2504.10481/images/7f8043c5633395c2adfc55d0b319e11b7f5bbfb48b2a9d7af5961c8ed362baa1.jpg b/data/2025/2504_10xxx/2504.10481/images/7f8043c5633395c2adfc55d0b319e11b7f5bbfb48b2a9d7af5961c8ed362baa1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fdd584623921a9e417d83edb2c4b7d1b5738452f --- /dev/null +++ b/data/2025/2504_10xxx/2504.10481/images/7f8043c5633395c2adfc55d0b319e11b7f5bbfb48b2a9d7af5961c8ed362baa1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:44a68da8e6f16f05eea0b1ffe2914419c639e462414d24cd477a69183983aee9 +size 15880 diff --git a/data/2025/2504_10xxx/2504.10481/images/896e2b96f77717b8e0c38a400cdd6dd74ebd302e370ea750aa771547dddedf3e.jpg b/data/2025/2504_10xxx/2504.10481/images/896e2b96f77717b8e0c38a400cdd6dd74ebd302e370ea750aa771547dddedf3e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..62167d0bff0649d252416d96f82654d4249404bf --- /dev/null +++ b/data/2025/2504_10xxx/2504.10481/images/896e2b96f77717b8e0c38a400cdd6dd74ebd302e370ea750aa771547dddedf3e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3ba65e44b75f4e058c89e065a1ceb3fb3e55ce17487e762543f15a0df35d8d60 +size 31751 diff --git a/data/2025/2504_10xxx/2504.10481/images/90a45066233cbc451c141303d4e374d74093bc3381684a463af0d7f9ab2afa0a.jpg b/data/2025/2504_10xxx/2504.10481/images/90a45066233cbc451c141303d4e374d74093bc3381684a463af0d7f9ab2afa0a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..614c6a83a49c894e6ad7866fe2d04fb6d46dfeea --- /dev/null +++ b/data/2025/2504_10xxx/2504.10481/images/90a45066233cbc451c141303d4e374d74093bc3381684a463af0d7f9ab2afa0a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dda6a26d677bc01d37e21d679dde694b5afc395fd3327f4a0bd787155d0aa6c2 +size 11386 diff --git a/data/2025/2504_10xxx/2504.10481/images/9692347bd77c26f19e069abddee6ccb60a7779865398c8c3609ace614cb755fb.jpg b/data/2025/2504_10xxx/2504.10481/images/9692347bd77c26f19e069abddee6ccb60a7779865398c8c3609ace614cb755fb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bf288b2fe4cc432f9f9259d6062d02d7745be759 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10481/images/9692347bd77c26f19e069abddee6ccb60a7779865398c8c3609ace614cb755fb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:afe06f0e0dc5d002550a4d61bf37f56b1e8ecdabf5973970812f8f00e1b715d7 +size 185577 diff --git a/data/2025/2504_10xxx/2504.10481/images/ab208174476212e4137c9a301bd0ee711411191248932f47f50bc7023c55d50b.jpg b/data/2025/2504_10xxx/2504.10481/images/ab208174476212e4137c9a301bd0ee711411191248932f47f50bc7023c55d50b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7baa838aad764a8dd4ee36afd276809968e2def5 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10481/images/ab208174476212e4137c9a301bd0ee711411191248932f47f50bc7023c55d50b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fd5bdc965b153cf0631a2fde8b555ce638b9743c3e37a030f88cea54b9f21210 +size 123186 diff --git a/data/2025/2504_10xxx/2504.10481/images/ace9afe870fb516fba4a36b235780757dc31427ac11e60fe7a7dd45ba0ad352f.jpg b/data/2025/2504_10xxx/2504.10481/images/ace9afe870fb516fba4a36b235780757dc31427ac11e60fe7a7dd45ba0ad352f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..cb9a92967e77f038e3768a889722f778d8031935 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10481/images/ace9afe870fb516fba4a36b235780757dc31427ac11e60fe7a7dd45ba0ad352f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:86a78e3e98d9f741f92acdf1519996f45e796da49a52703c56420d7aec695ac4 +size 168712 diff --git a/data/2025/2504_10xxx/2504.10481/images/af8a758a9ce34e27bf2d1dc172a720e7d29f1449eb7ef7443c3396698c925eb8.jpg b/data/2025/2504_10xxx/2504.10481/images/af8a758a9ce34e27bf2d1dc172a720e7d29f1449eb7ef7443c3396698c925eb8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fa63ec68cd6ebf86c1ae759ded6b3a7b57340c2e --- /dev/null +++ b/data/2025/2504_10xxx/2504.10481/images/af8a758a9ce34e27bf2d1dc172a720e7d29f1449eb7ef7443c3396698c925eb8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:67a3090fd60f9c7974beba8c666ad4703227b2b45efbc66806bf5bf0cea817cd +size 135613 diff --git a/data/2025/2504_10xxx/2504.10481/images/b0c4cb4845ea9f03c0724fd183e74ebd191cb72e12c445bf83a4897206519880.jpg b/data/2025/2504_10xxx/2504.10481/images/b0c4cb4845ea9f03c0724fd183e74ebd191cb72e12c445bf83a4897206519880.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5355e3ffc76b94ae17fa655aaa579cb3fdaf3edc --- /dev/null +++ b/data/2025/2504_10xxx/2504.10481/images/b0c4cb4845ea9f03c0724fd183e74ebd191cb72e12c445bf83a4897206519880.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6ad074e5c0d9a8519716e7fd7bb447c6574770ffdd1aaa015c835675fe44ca21 +size 50442 diff --git a/data/2025/2504_10xxx/2504.10481/images/b1acc2ec410f8786487d62af1a5b00c560cbb70f19fd5ef0c2cc2a8b235279f6.jpg b/data/2025/2504_10xxx/2504.10481/images/b1acc2ec410f8786487d62af1a5b00c560cbb70f19fd5ef0c2cc2a8b235279f6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..22c53780d0a0fcdeeb0ed00828d3a60516e87f5c --- /dev/null +++ b/data/2025/2504_10xxx/2504.10481/images/b1acc2ec410f8786487d62af1a5b00c560cbb70f19fd5ef0c2cc2a8b235279f6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c41221eed025863d92cfb08a4fce75a76c23353ad69d1d431d215ea627d65fb8 +size 23558 diff --git a/data/2025/2504_10xxx/2504.10481/images/b874d9a606f5f4970d4e4fa06d18db6b75c18a4b5aee064087142d606db39ced.jpg b/data/2025/2504_10xxx/2504.10481/images/b874d9a606f5f4970d4e4fa06d18db6b75c18a4b5aee064087142d606db39ced.jpg new file mode 100644 index 0000000000000000000000000000000000000000..cbace019ff8c0891e82cff20ef8bd324cd49d9a4 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10481/images/b874d9a606f5f4970d4e4fa06d18db6b75c18a4b5aee064087142d606db39ced.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ed9987b49ff8ab5d9dd19bd37abb6ae3ca705835e448c1d5db419dca681b45bd +size 9165 diff --git a/data/2025/2504_10xxx/2504.10481/images/c2fad44e3447682f07ed7b3b720c84c552b7a4e0bd7f0a0ad060aca491fe686e.jpg b/data/2025/2504_10xxx/2504.10481/images/c2fad44e3447682f07ed7b3b720c84c552b7a4e0bd7f0a0ad060aca491fe686e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c68e4c4b1ebd999e89afe5d2be99397bd31c5cef --- /dev/null +++ b/data/2025/2504_10xxx/2504.10481/images/c2fad44e3447682f07ed7b3b720c84c552b7a4e0bd7f0a0ad060aca491fe686e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2aa54984f71a667358341e2a312e0031159d0e11c83630e4fe0087e6b7e8d920 +size 25692 diff --git a/data/2025/2504_10xxx/2504.10481/images/c517511348593103d11d7b4b5735a2b47eb6ee73d4973baed5ac68d9f8ac00a3.jpg b/data/2025/2504_10xxx/2504.10481/images/c517511348593103d11d7b4b5735a2b47eb6ee73d4973baed5ac68d9f8ac00a3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1da3c82f6ac27d09b94009cd69a928797be1ef3a --- /dev/null +++ b/data/2025/2504_10xxx/2504.10481/images/c517511348593103d11d7b4b5735a2b47eb6ee73d4973baed5ac68d9f8ac00a3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:27fedca32ea8525a596e0d63dc6507f89610c2b2e6e7698d348d9b9f60c89af7 +size 14343 diff --git a/data/2025/2504_10xxx/2504.10481/images/c7b64161e6bad9b6a55e1d24b6081bbc80d1590623a46a0a5887f61e520e322d.jpg b/data/2025/2504_10xxx/2504.10481/images/c7b64161e6bad9b6a55e1d24b6081bbc80d1590623a46a0a5887f61e520e322d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ac49ae1ba34f652b16916dc9e2485434fab39fe1 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10481/images/c7b64161e6bad9b6a55e1d24b6081bbc80d1590623a46a0a5887f61e520e322d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:547ab259547dcb312c9c35e34b9cc20f705c7d2df16278311677da5b248f3c5f +size 87881 diff --git a/data/2025/2504_10xxx/2504.10481/images/cf52ca7a8e9947bb233c95a2f5bc49cc7865aafc0cf0407b1f0d6d1066f28b29.jpg b/data/2025/2504_10xxx/2504.10481/images/cf52ca7a8e9947bb233c95a2f5bc49cc7865aafc0cf0407b1f0d6d1066f28b29.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6c8c619be11cf53028acca60a20d60aa089827a6 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10481/images/cf52ca7a8e9947bb233c95a2f5bc49cc7865aafc0cf0407b1f0d6d1066f28b29.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c8ad0baee5600d5380cb2c3e1ae6e80f8e292af6e3a666ee4e6a732fd3de6592 +size 11688 diff --git a/data/2025/2504_10xxx/2504.10481/images/e9b4dd3cde946e173686828b3110179c284836bb8a3a8b4e6ad063eb9ec3c465.jpg b/data/2025/2504_10xxx/2504.10481/images/e9b4dd3cde946e173686828b3110179c284836bb8a3a8b4e6ad063eb9ec3c465.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5918e192febdcf419bd18bc80fc8d5bfe96072d8 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10481/images/e9b4dd3cde946e173686828b3110179c284836bb8a3a8b4e6ad063eb9ec3c465.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b7d720cd01cce754c488c2dbc2a537f0dc8cadb75a9c30fc393fbe69dfbd5ff8 +size 27865 diff --git a/data/2025/2504_10xxx/2504.10481/images/f0f39aa49410fc0e741f63585bbd3a48e8a9c8fdfc5d987e2b79d4ffef1da1bd.jpg b/data/2025/2504_10xxx/2504.10481/images/f0f39aa49410fc0e741f63585bbd3a48e8a9c8fdfc5d987e2b79d4ffef1da1bd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..749465c7c06e4fd79098824e58c3f455e77bf5e9 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10481/images/f0f39aa49410fc0e741f63585bbd3a48e8a9c8fdfc5d987e2b79d4ffef1da1bd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e208ff0102ec0f2575fa08b5280d5c01562acb65ff68a22b839e6ab17cd41273 +size 23290 diff --git a/data/2025/2504_10xxx/2504.10481/images/f5727c2e469bab12fce1b06ec5d22b6afe087d69e01d8fc5aaf17bfaa9fdf2fe.jpg b/data/2025/2504_10xxx/2504.10481/images/f5727c2e469bab12fce1b06ec5d22b6afe087d69e01d8fc5aaf17bfaa9fdf2fe.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7a698390f66d0998302bc677d046a586bbc250a0 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10481/images/f5727c2e469bab12fce1b06ec5d22b6afe087d69e01d8fc5aaf17bfaa9fdf2fe.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a792d31e65d54f7e87e9fc46703362d3463081fbc25f8e5f5ee3286c869b0f59 +size 67028 diff --git a/data/2025/2504_10xxx/2504.10481/images/f9faf40a1f834a9f752075ed85d7300770dd78771941ebd3aaf9e3b87f121ceb.jpg b/data/2025/2504_10xxx/2504.10481/images/f9faf40a1f834a9f752075ed85d7300770dd78771941ebd3aaf9e3b87f121ceb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..982327578efbb4d72b9a07b291f8804663841fd7 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10481/images/f9faf40a1f834a9f752075ed85d7300770dd78771941ebd3aaf9e3b87f121ceb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8fa189f056d5e9189677fb4d48069e8e9eae079093b58f5b35cb2b907079e658 +size 185014 diff --git a/data/2025/2504_10xxx/2504.10481/images/fc0db9d96b61c24a2bb9520c503fadc3531bfe8b2e6d23d36abedd249b486d06.jpg b/data/2025/2504_10xxx/2504.10481/images/fc0db9d96b61c24a2bb9520c503fadc3531bfe8b2e6d23d36abedd249b486d06.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e427394710c98f84c9c011426790c4388b2b09aa --- /dev/null +++ b/data/2025/2504_10xxx/2504.10481/images/fc0db9d96b61c24a2bb9520c503fadc3531bfe8b2e6d23d36abedd249b486d06.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0fe39c0c5895a36ae1e67434ebb46b6d6abf38c181f5ab9806688e690a97bfee +size 132513 diff --git a/data/2025/2504_10xxx/2504.10481/images/fd11d917852a50665391d27d111940fb6ecebf798a934d5a7eff7d7d0412bf43.jpg b/data/2025/2504_10xxx/2504.10481/images/fd11d917852a50665391d27d111940fb6ecebf798a934d5a7eff7d7d0412bf43.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e0f33c9326b1bc185a0b364555e349bca913f4f6 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10481/images/fd11d917852a50665391d27d111940fb6ecebf798a934d5a7eff7d7d0412bf43.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:76de197b682c4c6c8ca48cbfd67032b9e4631e87dcd6ca7d678422840c624e92 +size 58163 diff --git a/data/2025/2504_10xxx/2504.10481/images/fe6fa443c497f26a8ddbc810733dddf78eeec3fa5aadf72694d2815fa363a742.jpg b/data/2025/2504_10xxx/2504.10481/images/fe6fa443c497f26a8ddbc810733dddf78eeec3fa5aadf72694d2815fa363a742.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b0b90473d499d03ca32cbb868f82323843fc19d6 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10481/images/fe6fa443c497f26a8ddbc810733dddf78eeec3fa5aadf72694d2815fa363a742.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1da28066892eb845466b5519afb550f1428c327f1899e5ca6932bdff36015fdc +size 137393 diff --git a/data/2025/2504_10xxx/2504.10481/layout.json b/data/2025/2504_10xxx/2504.10481/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..f71a5ea91faddf5ac89f80d9c524eb3792149177 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10481/layout.json @@ -0,0 +1,17336 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 160, + 179, + 453, + 194 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 179, + 453, + 194 + ], + "spans": [ + { + "bbox": [ + 160, + 179, + 453, + 194 + ], + "type": "text", + "content": "Ding Chen" + }, + { + "bbox": [ + 160, + 179, + 453, + 194 + ], + "type": "inline_equation", + "content": "^{1*}" + }, + { + "bbox": [ + 160, + 179, + 453, + 194 + ], + "type": "text", + "content": " Qingchen Yu" + }, + { + "bbox": [ + 160, + 179, + 453, + 194 + ], + "type": "inline_equation", + "content": "^{2*}" + }, + { + "bbox": [ + 160, + 179, + 453, + 194 + ], + "type": "text", + "content": " Pengyuan Wang" + }, + { + "bbox": [ + 160, + 179, + 453, + 194 + ], + "type": "inline_equation", + "content": "^{2*}" + }, + { + "bbox": [ + 160, + 179, + 453, + 194 + ], + "type": "text", + "content": " Wentao Zhang" + }, + { + "bbox": [ + 160, + 179, + 453, + 194 + ], + "type": "inline_equation", + "content": "^{3\\dagger}" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 151, + 208, + 461, + 221 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 151, + 208, + 461, + 221 + ], + "spans": [ + { + "bbox": [ + 151, + 208, + 461, + 221 + ], + "type": "text", + "content": "Bo Tang² Feiyu Xiong² Xinchi Li¹ Minchuan Yang¹ Zhiyu Li²†" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 196, + 231, + 414, + 243 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 196, + 231, + 414, + 243 + ], + "spans": [ + { + "bbox": [ + 196, + 231, + 414, + 243 + ], + "type": "text", + "content": "1 Research Institute of China Telecom, Beijing, China" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 210, + 243, + 400, + 255 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 210, + 243, + 400, + 255 + ], + "spans": [ + { + "bbox": [ + 210, + 243, + 400, + 255 + ], + "type": "text", + "content": "2 MemTensor (Shanghai) Technology Co., Ltd." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 200, + 255, + 411, + 277 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 200, + 255, + 411, + 277 + ], + "spans": [ + { + "bbox": [ + 200, + 255, + 411, + 277 + ], + "type": "text", + "content": "3 Center for Data Science, Peking University wentao.zhang@pku.edu.cn, lizy@iaar.ac.cn" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 281, + 306, + 329, + 318 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 281, + 306, + 329, + 318 + ], + "spans": [ + { + "bbox": [ + 281, + 306, + 329, + 318 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 140, + 331, + 470, + 561 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 331, + 470, + 561 + ], + "spans": [ + { + "bbox": [ + 140, + 331, + 470, + 561 + ], + "type": "text", + "content": "With the release of the o1 model by OpenAI, reasoning models adopting slow thinking strategies have gradually emerged. As the responses generated by such models often include complex reasoning, intermediate steps, and self-reflection, existing evaluation methods are often inadequate. They struggle to determine whether the LLM output is truly equivalent to the reference answer, and also have difficulty identifying and extracting the final answer from long, complex responses. To address this issue, we propose xVerify, an efficient answer verifier for reasoning model evaluations. xVerify demonstrates strong capability in equivalence judgment, enabling it to effectively determine whether the answers produced by reasoning models are equivalent to reference answers across various types of objective questions. To train and evaluate xVerify, we construct the VAR dataset by collecting question-answer pairs generated by multiple LLMs across various datasets, leveraging multiple reasoning models and challenging evaluation sets designed specifically for reasoning model assessment. A multi-round annotation process is employed to ensure label accuracy. Based on the VAR dataset, we train multiple xVerify models of different scales. In evaluation experiments conducted on both the test set and generalization set, all xVerify models achieve overall F1 scores and accuracy exceeding " + }, + { + "bbox": [ + 140, + 331, + 470, + 561 + ], + "type": "inline_equation", + "content": "95\\%" + }, + { + "bbox": [ + 140, + 331, + 470, + 561 + ], + "type": "text", + "content": ". Notably, the smallest variant, xVerify-0.5B-I, outperforms all evaluation methods except GPT-4o, while xVerify-3B-Ib surpasses GPT-4o in overall performance. These results validate the effectiveness and generalizability of xVerify. All resources for xVerify are available at https://github.com/IAAR-Shanghai/xVerify." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 582, + 192, + 594 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 582, + 192, + 594 + ], + "spans": [ + { + "bbox": [ + 105, + 582, + 192, + 594 + ], + "type": "text", + "content": "1 Introduction" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 606, + 506, + 694 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 606, + 506, + 694 + ], + "spans": [ + { + "bbox": [ + 104, + 606, + 506, + 694 + ], + "type": "text", + "content": "With the emergence of chain of thought (CoT) prompting [35], researchers began to explicitly encourage LLMs to generate intermediate reasoning steps, thereby enhancing their ability to handle complex tasks. Following this, OpenAI introduced the o1 model [15], which proposed the concepts of slow thinking and scaling at test time. Specifically, the model is trained to output a detailed reasoning process before generating a final answer, significantly improving its performance on complex tasks. Inspired by this paradigm, a variety of reasoning models have emerged, such as DeepSeek-R1 [3] trained with GRPO, OpenAI's o3-mini [26], and QwQ-32B [31]. However, the rise of reasoning models poses substantial challenges for evaluation. Since the outputs of these models often contain" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 14, + 209, + 37, + 559 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 209, + 37, + 559 + ], + "spans": [ + { + "bbox": [ + 14, + 209, + 37, + 559 + ], + "type": "text", + "content": "arXiv:2504.10481v1 [cs.CL] 14 Apr 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 116, + 99, + 135, + 119 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 99, + 135, + 119 + ], + "spans": [ + { + "bbox": [ + 116, + 99, + 135, + 119 + ], + "type": "text", + "content": "#" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 137, + 102, + 494, + 140 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 137, + 102, + 494, + 140 + ], + "spans": [ + { + "bbox": [ + 137, + 102, + 494, + 140 + ], + "type": "text", + "content": "xVerify: Efficient Answer Verifier for Reasoning Model Evaluations" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 116, + 701, + 285, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 701, + 285, + 712 + ], + "spans": [ + { + "bbox": [ + 116, + 701, + 285, + 712 + ], + "type": "text", + "content": "*Equal contribution. † Corresponding authors" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 731, + 192, + 742 + ], + "type": "footer", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 731, + 192, + 742 + ], + "spans": [ + { + "bbox": [ + 105, + 731, + 192, + 742 + ], + "type": "text", + "content": "Preprint. Under review." + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 72, + 504, + 106 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 72, + 504, + 106 + ], + "spans": [ + { + "bbox": [ + 104, + 72, + 504, + 106 + ], + "type": "text", + "content": "lengthy reasoning processes—potentially including redundant information, intermediate results, and even self-contradictions—it becomes significantly more difficult for evaluation tools to extract the final answer from such responses [2]." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 110, + 506, + 266 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 110, + 506, + 266 + ], + "spans": [ + { + "bbox": [ + 104, + 110, + 506, + 266 + ], + "type": "text", + "content": "Developing evaluation methods tailored for LLM responses involving complex reasoning has become a key research focus. LLM reasoning is typically categorized into commonsense, logical, multihop, and mathematical reasoning [8]. Existing evaluation methods fall into automatic and human evaluation [2], with automatic evaluation gaining prominence due to its scalability and lower cost. The main automatic approaches for evaluating reasoning models include rule-based evaluation frameworks [13, 5, 27, 9, 25] and LLM-based judgment methods [20, 7, 18]. However, both approaches face limitations in reasoning model evaluation. Rule-based frameworks often struggle to extract final answers from lengthy reasoning traces, rely on strict formatting (e.g., syntactically correct LaTeX), and typically ignore the reasoning process itself—an oversimplification challenged by many researchers [36, 33, 14, 32]. Judge models are usually not optimized for reasoning evaluation and mainly produce qualitative scores or comments [7], making them more suitable for subjective questions. Objective tasks, in contrast, require accurate binary classification. Currently, effective automatic methods specifically designed for evaluating reasoning on objective questions remain lacking." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 269, + 506, + 423 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 269, + 506, + 423 + ], + "spans": [ + { + "bbox": [ + 104, + 269, + 506, + 423 + ], + "type": "text", + "content": "To address these challenges, we introduce xVerify, an efficient LLM-answer verifier tailored for evaluating LLM responses to objective questions. xVerify processes the full LLM output, enabling it to accurately identify final answers from complex reasoning traces. It also supports robust equivalence checking, including symbol conversion (e.g., 'alpha' " + }, + { + "bbox": [ + 104, + 269, + 506, + 423 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 104, + 269, + 506, + 423 + ], + "type": "text", + "content": " 'α'), mathematical expression matching, and semantic alignment in natural language. Moreover, it is tolerant of formatting errors such as malformed LaTeX, making it applicable to a wide range of tasks, including math problems, multiple-choice, short-answer, and classification questions. To train and evaluate xVerify, we construct the Verify Answer for Reasoning (VAR) dataset, which includes responses from 19 LLMs across 24 reasoning benchmarks. All labels are verified through multi-round GPT-4o and human review. The dataset covers advanced reasoning models and benchmarks like GPQA, LiveMathBench, and AIME 2024. We fine-tune xVerify on a variety of base models (e.g., Qwen2.5, LLaMA, Gemma 2) and scales (0.5B-32B). Remarkably, even the smallest variant (xVerify-0.5B-I) surpasses existing evaluation methods—including 32B-sized models—on all metrics, while larger variants achieve F1 and accuracy over " + }, + { + "bbox": [ + 104, + 269, + 506, + 423 + ], + "type": "inline_equation", + "content": "95\\%" + }, + { + "bbox": [ + 104, + 269, + 506, + 423 + ], + "type": "text", + "content": " on both test and generalization sets." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 427, + 364, + 439 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 427, + 364, + 439 + ], + "spans": [ + { + "bbox": [ + 105, + 427, + 364, + 439 + ], + "type": "text", + "content": "The main contributions of this paper are summarized as follows:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 132, + 445, + 504, + 540 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 132, + 445, + 504, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 445, + 504, + 479 + ], + "spans": [ + { + "bbox": [ + 132, + 445, + 504, + 479 + ], + "type": "text", + "content": "- We construct the VAR dataset, which contains answer samples from 19 LLMs across 24 evaluation benchmarks. The dataset is annotated via multiple rounds of GPT-4o and human review, and is designed for training and evaluating judge models for reasoning tasks." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 132, + 482, + 504, + 505 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 482, + 504, + 505 + ], + "spans": [ + { + "bbox": [ + 132, + 482, + 504, + 505 + ], + "type": "text", + "content": "- We propose xVerify, an efficient answer verifier for reasoning model evaluations, and release multiple fine-tuned versions of xVerify. The checkpoints are publicly available2." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 506, + 504, + 540 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 506, + 504, + 540 + ], + "spans": [ + { + "bbox": [ + 132, + 506, + 504, + 540 + ], + "type": "text", + "content": "- We conduct comprehensive comparative evaluations against multiple existing evaluation frameworks and judge models on both test and generalization datasets, thoroughly validating the effectiveness and applicability of xVerify." + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 105, + 555, + 197, + 567 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 555, + 197, + 567 + ], + "spans": [ + { + "bbox": [ + 105, + 555, + 197, + 567 + ], + "type": "text", + "content": "2 Related Work" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 578, + 506, + 678 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 578, + 506, + 678 + ], + "spans": [ + { + "bbox": [ + 104, + 578, + 506, + 678 + ], + "type": "text", + "content": "Evaluation methods have always been a crucial component in the development of LLM [2]. However, the open-ended nature of LLM outputs makes it difficult to apply standardized metrics, limiting the effectiveness of traditional evaluation methods [20]. The rise of reasoning models [26, 3, 31], which often generate lengthy and complex reasoning, further complicates evaluation. For objective tasks, the main challenge is to accurately extract the final answer from the LLM's semi-structured output and compare it with the reference answer. Existing approaches are typically divided into human evaluation and automatic evaluation. While human evaluation offers flexibility, automatic methods are more cost-efficient and consistent [2]. Current automatic methods mainly include rule-based evaluation frameworks and LLM-based judgment methods." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 682, + 506, + 705 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 682, + 506, + 705 + ], + "spans": [ + { + "bbox": [ + 104, + 682, + 506, + 705 + ], + "type": "text", + "content": "Rule-based methods are widely used in automatic evaluation frameworks such as LM Eval Harness [5], OpenCompass [27], UltraEval [9], and OpenAI Evalu [25]. Tools like Math-Verify [13] also follow" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 117, + 710, + 432, + 723 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 710, + 432, + 723 + ], + "spans": [ + { + "bbox": [ + 117, + 710, + 432, + 723 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 117, + 710, + 432, + 723 + ], + "type": "text", + "content": "Hugging Face collections: https://huggingface.co/collections/IAAR-Shanghai/xverify" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 72, + 506, + 216 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 72, + 506, + 216 + ], + "spans": [ + { + "bbox": [ + 104, + 72, + 506, + 216 + ], + "type": "text", + "content": "this approach, extracting final answers using regular expressions (RegEx) and comparing them with reference answers. However, LLM outputs often contain final answers in varied surface forms—e.g., \"alpha\" vs. \"α\", \"A\" vs. \"a\", or \"1000\" vs. \"10³\"—which can be semantically equivalent but textually different. While some tools support limited transformations, they typically handle only LaTeX expressions or simple string patterns, and struggle with basic semantic equivalence like \"one hundred\" vs. \"100\". For reasoning models, the output is usually lengthy and involves complex reasoning steps with intermediate results. This makes it difficult for regular expressions to accurately identify the final answer, causing rule-based approaches to frequently fail in such contexts. Moreover, prior work has shown that LLMs may revise or overturn their initial predictions during extended reasoning processes, exhibiting a kind of self-reflection [32]. At the same time, rule-based methods typically ignore the reasoning process and only evaluate the final answer, which has drawn criticism from many researchers—especially in the context of reasoning models [36, 33, 14]. Thus, rule-based evaluations have limited applicability in reasoning scenarios." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 220, + 506, + 385 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 220, + 506, + 385 + ], + "spans": [ + { + "bbox": [ + 104, + 220, + 506, + 385 + ], + "type": "text", + "content": "LLM-based judgment methods use fine-tuned LLMs to evaluate the quality of other LLMs' responses. Compared to traditional evaluation methods, they offer greater task adaptability, generate interpretable results, reduce evaluation costs, and can be applied across the LLM lifecycle [20, 7, 18]. For objective questions, these judge models can extract final answers from responses with intermediate reasoning or self-reflection. In recent years, many LLM-based judge models have emerged, including JudgeLM [39], PandaLM [34], Auto-J [21], Prometheus 2 [17], CompassJudger [1], CritiqueLLM [16], and Themis [12]. Judge models typically support pointwise, pairwise, and listwise evaluations [20], and some also serve as reward models in reinforcement learning. However, most are designed to assign scores to LLM outputs, making them more suitable for subjective evaluations like helpfulness, reliability, or relevance. For objective questions that require binary decisions (\"correct\" or \"incorrect\"), these models are less effective. Although scores can be binarized using thresholds, this approach is unreliable, as the models are not explicitly trained for such tasks. Moreover, the current LLM-based critic models and PRMs (Process Reward Models) exhibit subpar performance when detecting errors in long chain-of-thought responses generated by reasoning models [10]. Thus, while judge model holds promise for evaluating reasoning models, they require targeted training." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 389, + 506, + 434 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 389, + 506, + 434 + ], + "spans": [ + { + "bbox": [ + 104, + 389, + 506, + 434 + ], + "type": "text", + "content": "In summary, automatic evaluation on objective tasks remains underdeveloped. Rule-based and LLM-based methods each have clear limitations, while human annotation is costly and hard to scale. To address these challenges, we propose xVerify, a robust and targeted judge model specifically designed for objective evaluation of LLMs." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 449, + 224, + 460 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 449, + 224, + 460 + ], + "spans": [ + { + "bbox": [ + 105, + 449, + 224, + 460 + ], + "type": "text", + "content": "3 Problem Definition" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 474, + 504, + 506 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 474, + 504, + 506 + ], + "spans": [ + { + "bbox": [ + 104, + 474, + 504, + 506 + ], + "type": "text", + "content": "To evaluate the correctness of LLM responses to objective questions, the key is to extract the final answer from the response and compare it with the reference answer. We formally define this evaluation task as follows:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 512, + 506, + 557 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 512, + 506, + 557 + ], + "spans": [ + { + "bbox": [ + 104, + 512, + 506, + 557 + ], + "type": "text", + "content": "We formalize this task as a 4-tuple " + }, + { + "bbox": [ + 104, + 512, + 506, + 557 + ], + "type": "inline_equation", + "content": "(\\mathrm{Q},\\mathrm{R},\\mathrm{A}_{\\mathrm{ref}},\\mathrm{E})" + }, + { + "bbox": [ + 104, + 512, + 506, + 557 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 512, + 506, + 557 + ], + "type": "inline_equation", + "content": "\\mathrm{Q} = \\{q_1,q_2,\\dots,q_n\\}" + }, + { + "bbox": [ + 104, + 512, + 506, + 557 + ], + "type": "text", + "content": " is the set of questions, " + }, + { + "bbox": [ + 104, + 512, + 506, + 557 + ], + "type": "inline_equation", + "content": "\\mathrm{R} = \\{r_1,r_2,\\dots,r_n\\mid r_i = \\mathcal{W}(q_i)\\}" + }, + { + "bbox": [ + 104, + 512, + 506, + 557 + ], + "type": "text", + "content": " is the set of responses generated by an LLM " + }, + { + "bbox": [ + 104, + 512, + 506, + 557 + ], + "type": "inline_equation", + "content": "\\mathcal{W}" + }, + { + "bbox": [ + 104, + 512, + 506, + 557 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 512, + 506, + 557 + ], + "type": "inline_equation", + "content": "\\mathrm{A}_{\\mathrm{ref}} = \\{a_{ref}^{1},\\dots,a_{ref}^{n}\\}" + }, + { + "bbox": [ + 104, + 512, + 506, + 557 + ], + "type": "text", + "content": " is the set of reference answers, and " + }, + { + "bbox": [ + 104, + 512, + 506, + 557 + ], + "type": "inline_equation", + "content": "\\mathrm{E}:\\mathrm{Q}\\times \\mathrm{R}\\times \\mathrm{A}_{\\mathrm{ref}}\\to 0,1" + }, + { + "bbox": [ + 104, + 512, + 506, + 557 + ], + "type": "text", + "content": " is the evaluation function that returns 1 if the response is correct and 0 otherwise." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 562, + 505, + 617 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 562, + 505, + 617 + ], + "spans": [ + { + "bbox": [ + 104, + 562, + 505, + 617 + ], + "type": "text", + "content": "For the stage of extracting the final answer, given a response " + }, + { + "bbox": [ + 104, + 562, + 505, + 617 + ], + "type": "inline_equation", + "content": "r" + }, + { + "bbox": [ + 104, + 562, + 505, + 617 + ], + "type": "text", + "content": " to question " + }, + { + "bbox": [ + 104, + 562, + 505, + 617 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 104, + 562, + 505, + 617 + ], + "type": "text", + "content": ", which may include intermediate reasoning and multiple candidate answers, we denote the extracted candidates as " + }, + { + "bbox": [ + 104, + 562, + 505, + 617 + ], + "type": "inline_equation", + "content": "\\mathrm{A}(r)" + }, + { + "bbox": [ + 104, + 562, + 505, + 617 + ], + "type": "text", + "content": ". To identify the final answer, we define a scoring function " + }, + { + "bbox": [ + 104, + 562, + 505, + 617 + ], + "type": "inline_equation", + "content": "\\mathrm{S} : \\mathrm{A}(r) \\times \\mathrm{Q} \\to \\mathbb{R}" + }, + { + "bbox": [ + 104, + 562, + 505, + 617 + ], + "type": "text", + "content": " that measures the relevance or suitability of each candidate " + }, + { + "bbox": [ + 104, + 562, + 505, + 617 + ], + "type": "inline_equation", + "content": "a \\in \\mathrm{A}(r)" + }, + { + "bbox": [ + 104, + 562, + 505, + 617 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 104, + 562, + 505, + 617 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 104, + 562, + 505, + 617 + ], + "type": "text", + "content": ", and select the final answer using the extraction function:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 247, + 634, + 504, + 651 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 247, + 634, + 504, + 651 + ], + "spans": [ + { + "bbox": [ + 247, + 634, + 504, + 651 + ], + "type": "interline_equation", + "content": "\\varepsilon (q, r) = \\arg \\max _ {a \\in \\mathrm {A} (r)} \\mathrm {S} (a, q). \\tag {1}", + "image_path": "6a5b1b67cfb7ccb598fc6542b553d7a1537f4ffe9a234815ba02d2db50f43191.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 662, + 506, + 696 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 662, + 506, + 696 + ], + "spans": [ + { + "bbox": [ + 104, + 662, + 506, + 696 + ], + "type": "text", + "content": "For the equivalence comparison stage, we define an equivalence function " + }, + { + "bbox": [ + 104, + 662, + 506, + 696 + ], + "type": "inline_equation", + "content": "\\psi : \\mathrm{A}_{\\mathrm{ref}} \\times \\mathrm{A}_{\\mathrm{final}} \\to \\{0,1\\}" + }, + { + "bbox": [ + 104, + 662, + 506, + 696 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 662, + 506, + 696 + ], + "type": "inline_equation", + "content": "\\psi" + }, + { + "bbox": [ + 104, + 662, + 506, + 696 + ], + "type": "text", + "content": " returns 1 if the predicted answer is equivalent to the reference, and 0 otherwise. Since answers may appear in different forms, " + }, + { + "bbox": [ + 104, + 662, + 506, + 696 + ], + "type": "inline_equation", + "content": "\\psi" + }, + { + "bbox": [ + 104, + 662, + 506, + 696 + ], + "type": "text", + "content": " integrates results from the following three sub-functions:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 700, + 504, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 700, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 700, + 504, + 723 + ], + "type": "text", + "content": "For mathematical expressions, we define a composite normalization function " + }, + { + "bbox": [ + 104, + 700, + 504, + 723 + ], + "type": "inline_equation", + "content": "\\Phi_{\\mathrm{norm}}^{\\mathrm{math}} = \\phi_{\\mathrm{err}} \\circ \\phi_{\\mathrm{syn}} \\circ \\phi_{\\mathrm{alg}} \\circ \\phi_{\\mathrm{dim}}" + }, + { + "bbox": [ + 104, + 700, + 504, + 723 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 700, + 504, + 723 + ], + "type": "inline_equation", + "content": "\\phi_{\\mathrm{err}}" + }, + { + "bbox": [ + 104, + 700, + 504, + 723 + ], + "type": "text", + "content": " repairs minor syntax errors, " + }, + { + "bbox": [ + 104, + 700, + 504, + 723 + ], + "type": "inline_equation", + "content": "\\phi_{\\mathrm{syn}}" + }, + { + "bbox": [ + 104, + 700, + 504, + 723 + ], + "type": "text", + "content": " unifies syntactic structures, " + }, + { + "bbox": [ + 104, + 700, + 504, + 723 + ], + "type": "inline_equation", + "content": "\\phi_{\\mathrm{alg}}" + }, + { + "bbox": [ + 104, + 700, + 504, + 723 + ], + "type": "text", + "content": " performs" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 72, + 504, + 97 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 72, + 504, + 97 + ], + "spans": [ + { + "bbox": [ + 104, + 72, + 504, + 97 + ], + "type": "text", + "content": "algebraic simplification, and " + }, + { + "bbox": [ + 104, + 72, + 504, + 97 + ], + "type": "inline_equation", + "content": "\\phi_{\\mathrm{dim}}" + }, + { + "bbox": [ + 104, + 72, + 504, + 97 + ], + "type": "text", + "content": " ensures consistency in physical units. By transforming expressions into a canonical form, " + }, + { + "bbox": [ + 104, + 72, + 504, + 97 + ], + "type": "inline_equation", + "content": "\\Phi_{\\mathrm{norm}}^{math}" + }, + { + "bbox": [ + 104, + 72, + 504, + 97 + ], + "type": "text", + "content": " enables reliable equivalence comparison:" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 168, + 112, + 505, + 140 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 168, + 112, + 505, + 140 + ], + "spans": [ + { + "bbox": [ + 168, + 112, + 505, + 140 + ], + "type": "interline_equation", + "content": "\\psi_ {m a t h} \\left(a _ {r e f} ^ {m a t h}, a _ {f i n a l} ^ {m a t h}\\right) = \\left\\{ \\begin{array}{l l} 1 & \\text {i f} \\Phi_ {\\text {n o r m}} ^ {m a t h} \\left(a _ {r e f} ^ {m a t h}\\right) = \\Phi_ {\\text {n o r m}} ^ {m a t h} \\left(a _ {f i n a l} ^ {m a t h}\\right), \\\\ 0 & \\text {o t h e r w i s e} \\end{array} \\right. \\tag {2}", + "image_path": "cf52ca7a8e9947bb233c95a2f5bc49cc7865aafc0cf0407b1f0d6d1066f28b29.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 153, + 504, + 201 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 153, + 504, + 201 + ], + "spans": [ + { + "bbox": [ + 104, + 153, + 504, + 201 + ], + "type": "text", + "content": "For natural language answers, we define a comparison function " + }, + { + "bbox": [ + 104, + 153, + 504, + 201 + ], + "type": "inline_equation", + "content": "\\psi_{\\mathrm{nl}}: \\mathrm{A}_{\\mathrm{ref}}^{\\mathrm{nl}} \\times \\mathrm{A}_{\\mathrm{final}}^{\\mathrm{nl}} \\to \\{0,1\\}" + }, + { + "bbox": [ + 104, + 153, + 504, + 201 + ], + "type": "text", + "content": " to assess semantic equivalence. Specifically, we introduce a semantic alignment function " + }, + { + "bbox": [ + 104, + 153, + 504, + 201 + ], + "type": "inline_equation", + "content": "\\phi_{\\mathrm{align}}^{nl}" + }, + { + "bbox": [ + 104, + 153, + 504, + 201 + ], + "type": "text", + "content": " to measure the similarity between two textual answers. The equivalence decision is made by comparing the alignment score with a predefined threshold " + }, + { + "bbox": [ + 104, + 153, + 504, + 201 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 104, + 153, + 504, + 201 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 196, + 217, + 505, + 246 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 196, + 217, + 505, + 246 + ], + "spans": [ + { + "bbox": [ + 196, + 217, + 505, + 246 + ], + "type": "interline_equation", + "content": "\\psi_ {n l} \\left(a _ {r e f} ^ {n l}, a _ {f i n a l} ^ {n l}\\right) = \\left\\{ \\begin{array}{l l} 1 & \\text {i f} \\phi_ {\\text {a l i g n}} ^ {n l} \\left(a _ {r e f} ^ {n l}, a _ {f i n a l} ^ {n l}\\right) \\geq \\tau , \\\\ 0 & \\text {o t h e r w i s e} \\end{array} \\right. \\tag {3}", + "image_path": "b874d9a606f5f4970d4e4fa06d18db6b75c18a4b5aee064087142d606db39ced.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 258, + 506, + 304 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 258, + 506, + 304 + ], + "spans": [ + { + "bbox": [ + 104, + 258, + 506, + 304 + ], + "type": "text", + "content": "For symbolic representations, we define a composite normalization function " + }, + { + "bbox": [ + 104, + 258, + 506, + 304 + ], + "type": "inline_equation", + "content": "\\Phi_{\\mathrm{norm}}^{sym} = \\phi_{\\mathrm{uni}} \\circ \\phi_{\\mathrm{font}} \\circ \\phi_{\\mathrm{dom}}" + }, + { + "bbox": [ + 104, + 258, + 506, + 304 + ], + "type": "text", + "content": " which unifies symbols by applying " + }, + { + "bbox": [ + 104, + 258, + 506, + 304 + ], + "type": "inline_equation", + "content": "\\phi_{\\mathrm{uni}}" + }, + { + "bbox": [ + 104, + 258, + 506, + 304 + ], + "type": "text", + "content": " for Unicode normalization, " + }, + { + "bbox": [ + 104, + 258, + 506, + 304 + ], + "type": "inline_equation", + "content": "\\phi_{\\mathrm{font}}" + }, + { + "bbox": [ + 104, + 258, + 506, + 304 + ], + "type": "text", + "content": " for aligning font styles, and " + }, + { + "bbox": [ + 104, + 258, + 506, + 304 + ], + "type": "inline_equation", + "content": "\\phi_{\\mathrm{dom}}" + }, + { + "bbox": [ + 104, + 258, + 506, + 304 + ], + "type": "text", + "content": " for domain-specific mappings. This produces a standardized form for character-level comparison, and the " + }, + { + "bbox": [ + 104, + 258, + 506, + 304 + ], + "type": "inline_equation", + "content": "\\Phi_{\\mathrm{norm}}^{sym}" + }, + { + "bbox": [ + 104, + 258, + 506, + 304 + ], + "type": "text", + "content": " is defined as:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 178, + 320, + 505, + 353 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 178, + 320, + 505, + 353 + ], + "spans": [ + { + "bbox": [ + 178, + 320, + 505, + 353 + ], + "type": "interline_equation", + "content": "\\psi_ {s y m} \\left(a _ {r e f} ^ {s y m}, a _ {f i n a l} ^ {s y m}\\right) = \\left\\{ \\begin{array}{l l} 1 & \\text {i f} \\Phi_ {\\text {n o r m}} ^ {s y m} \\left(a _ {r e f} ^ {s y m}\\right) = \\Phi_ {\\text {n o r m}} ^ {s y m} \\left(a _ {f i n a l} ^ {s y m}\\right), \\\\ 0 & \\text {o t h e r w i s e} \\end{array} \\right. \\tag {4}", + "image_path": "90a45066233cbc451c141303d4e374d74093bc3381684a463af0d7f9ab2afa0a.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 364, + 504, + 388 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 364, + 504, + 388 + ], + "spans": [ + { + "bbox": [ + 104, + 364, + 504, + 388 + ], + "type": "text", + "content": "Based on the above components, we define a unified equivalence function " + }, + { + "bbox": [ + 104, + 364, + 504, + 388 + ], + "type": "inline_equation", + "content": "\\psi" + }, + { + "bbox": [ + 104, + 364, + 504, + 388 + ], + "type": "text", + "content": " to determine whether the final answer " + }, + { + "bbox": [ + 104, + 364, + 504, + 388 + ], + "type": "inline_equation", + "content": "a_{final}" + }, + { + "bbox": [ + 104, + 364, + 504, + 388 + ], + "type": "text", + "content": " matches the reference answer " + }, + { + "bbox": [ + 104, + 364, + 504, + 388 + ], + "type": "inline_equation", + "content": "a_{ref}" + }, + { + "bbox": [ + 104, + 364, + 504, + 388 + ], + "type": "text", + "content": " across different modalities. The definition is:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 190, + 403, + 505, + 459 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 190, + 403, + 505, + 459 + ], + "spans": [ + { + "bbox": [ + 190, + 403, + 505, + 459 + ], + "type": "interline_equation", + "content": "\\psi \\left(a _ {\\text {f i n a l}}, a _ {\\text {r e f}}\\right) = \\left\\{ \\begin{array}{l l} 1, & \\text {i f} \\psi_ {\\text {m a t h}} \\left(a _ {\\text {f i n a l}} ^ {\\text {m a t h}}, a _ {\\text {r e f}} ^ {\\text {m a t h}}\\right) = 1 \\\\ & \\quad \\wedge \\psi_ {\\text {n l}} \\left(a _ {\\text {f i n a l}} ^ {\\text {n l}}, a _ {\\text {r e f}} ^ {\\text {n l}}\\right) = 1 \\\\ & \\quad \\wedge \\psi_ {\\text {s y m}} \\left(a _ {\\text {f i n a l}} ^ {\\text {s y m}}, a _ {\\text {r e f}} ^ {\\text {s y m}}\\right) = 1; \\\\ 0, & \\text {o t h e r w i s e} \\end{array} \\right. \\tag {5}", + "image_path": "159d98ee817e85da35d1027d4375795bab56e66374089e82404d211381bf408d.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 472, + 504, + 508 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 472, + 504, + 508 + ], + "spans": [ + { + "bbox": [ + 104, + 472, + 504, + 508 + ], + "type": "text", + "content": "Here, " + }, + { + "bbox": [ + 104, + 472, + 504, + 508 + ], + "type": "inline_equation", + "content": "a_{final}^{math}, a_{final}^{nl}" + }, + { + "bbox": [ + 104, + 472, + 504, + 508 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 104, + 472, + 504, + 508 + ], + "type": "inline_equation", + "content": "a_{final}^{sym}" + }, + { + "bbox": [ + 104, + 472, + 504, + 508 + ], + "type": "text", + "content": " represent the mathematical, natural language, and symbolic parts of the final answer, respectively, and similarly for " + }, + { + "bbox": [ + 104, + 472, + 504, + 508 + ], + "type": "inline_equation", + "content": "a_{ref}" + }, + { + "bbox": [ + 104, + 472, + 504, + 508 + ], + "type": "text", + "content": ". This allows for equivalence checking in both unimodal and multimodal settings." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 512, + 354, + 522 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 512, + 354, + 522 + ], + "spans": [ + { + "bbox": [ + 104, + 512, + 354, + 522 + ], + "type": "text", + "content": "To summarize, the overall evaluation function " + }, + { + "bbox": [ + 104, + 512, + 354, + 522 + ], + "type": "inline_equation", + "content": "\\mathrm{E}" + }, + { + "bbox": [ + 104, + 512, + 354, + 522 + ], + "type": "text", + "content": " is defined as:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 237, + 542, + 505, + 556 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 237, + 542, + 505, + 556 + ], + "spans": [ + { + "bbox": [ + 237, + 542, + 505, + 556 + ], + "type": "interline_equation", + "content": "\\mathrm {E} (q, r, a _ {r e f}) = \\psi (\\varepsilon (q, r), a _ {r e f}) \\tag {6}", + "image_path": "4f10bb715cd101b2ec27be50e1ac22a3276437b0b6ed62ae6bc588de54844429.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 567, + 506, + 590 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 567, + 506, + 590 + ], + "spans": [ + { + "bbox": [ + 104, + 567, + 506, + 590 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 567, + 506, + 590 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 104, + 567, + 506, + 590 + ], + "type": "text", + "content": " is the objective question, " + }, + { + "bbox": [ + 104, + 567, + 506, + 590 + ], + "type": "inline_equation", + "content": "r" + }, + { + "bbox": [ + 104, + 567, + 506, + 590 + ], + "type": "text", + "content": " is the response generated by the LLM, and " + }, + { + "bbox": [ + 104, + 567, + 506, + 590 + ], + "type": "inline_equation", + "content": "a_{ref}" + }, + { + "bbox": [ + 104, + 567, + 506, + 590 + ], + "type": "text", + "content": " is the corresponding reference answer." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 608, + 194, + 622 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 608, + 194, + 622 + ], + "spans": [ + { + "bbox": [ + 104, + 608, + 194, + 622 + ], + "type": "text", + "content": "4 Methodology" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 634, + 506, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 634, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 634, + 506, + 723 + ], + "type": "text", + "content": "The xVerify training and evaluation pipeline includes three main stages: collecting LLM responses, VAR dataset construction, and xVerify judge pipeline (see Figure 1). We first gather question-response pairs from various LLMs across four types of objective questions, including complex, reasoning-intensive examples. To ensure accurate labels, we employ multiple rounds of annotation and rechecking using both GPT-4o and human annotators. We also apply data augmentation to increase the dataset's diversity and complexity. Finally, we train xVerify models of different sizes on the VAR dataset to evaluate long, multi-step answers—cases that are often difficult for existing evaluation methods. Section 4.1 details the dataset construction, and Section 4.2 describes the training process." + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 136, + 68, + 476, + 282 + ], + "blocks": [ + { + "bbox": [ + 136, + 68, + 476, + 282 + ], + "lines": [ + { + "bbox": [ + 136, + 68, + 476, + 282 + ], + "spans": [ + { + "bbox": [ + 136, + 68, + 476, + 282 + ], + "type": "image", + "image_path": "6d439d7f1c4119a53063123e4a1a272a2937ccfe8b265eebde7d815d650c02cb.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 285, + 506, + 342 + ], + "lines": [ + { + "bbox": [ + 104, + 285, + 506, + 342 + ], + "spans": [ + { + "bbox": [ + 104, + 285, + 506, + 342 + ], + "type": "text", + "content": "Figure 1: Framework of xVerify: (1) Collecting LLM Responses: aggregate responses from multiple LLMs across datasets covering four question types. (2) VAR Dataset Construction: employ GPT-4o and human annotators for labeling and rechecking, and use data augmentation to refine the dataset. (3) xVerify Judge Pipeline: accurately evaluate multi-component answers from reasoning models on challenging questions." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 358, + 186, + 369 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 358, + 186, + 369 + ], + "spans": [ + { + "bbox": [ + 105, + 358, + 186, + 369 + ], + "type": "text", + "content": "4.1 VAR Dataset" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 380, + 504, + 447 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 380, + 504, + 447 + ], + "spans": [ + { + "bbox": [ + 104, + 380, + 504, + 447 + ], + "type": "text", + "content": "xVerify is designed to assess the correctness of reasoning models' responses on objective questions. However, current judge models are mostly trained on tasks such as scoring or reviewing, and reasoning models with lengthy responses have only recently emerged. As a result, there is currently no suitable dataset for training xVerify. To better train and evaluate xVerify, we constructed a dedicated dataset named Verify Answer for Reasoning (VAR). Examples from the VAR dataset are provided in Appendix B.3." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 461, + 254, + 473 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 461, + 254, + 473 + ], + "spans": [ + { + "bbox": [ + 105, + 461, + 254, + 473 + ], + "type": "text", + "content": "4.1.1 LLM Response Generation" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 481, + 506, + 591 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 481, + 506, + 591 + ], + "spans": [ + { + "bbox": [ + 104, + 481, + 506, + 591 + ], + "type": "text", + "content": "To ensure the diversity and coverage of the dataset, we selected 19 mainstream LLMs and 24 frequently used multilingual datasets to generate and collect responses. To better simulate the answering patterns of reasoning models in common evaluation scenarios, the chosen LLMs include recently released models such as the DeepSeek-R1-Distill series [3] and QwQ-32B [31]. Most of the other LLMs also support context lengths exceeding " + }, + { + "bbox": [ + 104, + 481, + 506, + 591 + ], + "type": "inline_equation", + "content": "32k" + }, + { + "bbox": [ + 104, + 481, + 506, + 591 + ], + "type": "text", + "content": " tokens, enabling them to produce answers with extended reasoning chains. The selected datasets include high-difficulty benchmarks commonly used for evaluating reasoning models, such as GPQA [28], AIME 2024 [24], MATH [11], and LiveCodeBench [23], which typically require multi-step reasoning and computation to solve. During data generation, we also retained some extremely long responses, such as those exceeding 6k characters in length. Detailed information on all LLMs and datasets is provided in Appendix A." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 596, + 504, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 596, + 504, + 662 + ], + "spans": [ + { + "bbox": [ + 104, + 596, + 504, + 662 + ], + "type": "text", + "content": "To train and evaluate xVerify more effectively, we grouped the 24 datasets into four types based on question and answer formats: multiple choice, math, short answer, and classification. Multiple choice questions offer several labeled options; math includes questions where answers are mathematical expressions (e.g., numbers, equations), including mathematics and physics problems; short answer questions expect brief natural language responses like names or dates, with no strict format constraints; classification tasks involve selecting the correct label, such as for sentiment or topic classification." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 667, + 504, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 667, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 667, + 504, + 723 + ], + "type": "text", + "content": "To reflect realistic evaluation settings and generate a diverse set of Q&A samples, we designed multiple prompt templates for guiding the LLMs in response generation. The prompt configurations vary along several dimensions: 0-shot vs. 5-shot, with or without CoT, and with or without answer format restrictions (restrict), resulting in eight distinct prompt types. Details of all prompt templates are provided in Appendix D.1." + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 72, + 504, + 97 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 72, + 504, + 97 + ], + "spans": [ + { + "bbox": [ + 104, + 72, + 504, + 97 + ], + "type": "text", + "content": "In total, we generated 191,600 Q&A samples using the 19 LLMs and 24 evaluation sets, providing a rich and diverse sample pool for constructing the dataset." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 106, + 224, + 118 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 106, + 224, + 118 + ], + "spans": [ + { + "bbox": [ + 105, + 106, + 224, + 118 + ], + "type": "text", + "content": "4.1.2 Dataset Partitioning" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 125, + 504, + 148 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 125, + 504, + 148 + ], + "spans": [ + { + "bbox": [ + 104, + 125, + 504, + 148 + ], + "type": "text", + "content": "Based on the previously collected sample pool, we constructed the training, test, and generalization sets through filtering and preprocessing." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 152, + 504, + 197 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 152, + 504, + 197 + ], + "spans": [ + { + "bbox": [ + 104, + 152, + 504, + 197 + ], + "type": "text", + "content": "The training and test sets are used to train and evaluate the xVerify model. Both are sampled from the same pool, sharing similar distributions. Specifically, they include samples generated by 15 LLMs across 17 evaluation sets, covering the four previously mentioned question types. The training set contains 36,941 samples, and the test set includes 5,194 samples." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 201, + 505, + 258 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 201, + 505, + 258 + ], + "spans": [ + { + "bbox": [ + 104, + 201, + 505, + 258 + ], + "type": "text", + "content": "The generalization set complements the test set by evaluating xVerify's ability to handle more diverse and challenging distributions, reflecting real-world scenarios. It consists of 5,366 samples from 7 evaluation sets not used in the training or test sets, while still spanning all four question types. These samples are generated by 19 LLMs, including 4 models not seen in training or testing, such as the reasoning model QwQ-32B, resulting in greater diversity and distribution shift." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 261, + 504, + 285 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 261, + 504, + 285 + ], + "spans": [ + { + "bbox": [ + 104, + 261, + 504, + 285 + ], + "type": "text", + "content": "Section 4.1.4 introduces our data augmentation strategy, which adds more challenging samples to all three sets. Detailed dataset statistics are provided in Appendix B.1." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 295, + 213, + 306 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 295, + 213, + 306 + ], + "spans": [ + { + "bbox": [ + 105, + 295, + 213, + 306 + ], + "type": "text", + "content": "4.1.3 Data Annotations" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 313, + 506, + 402 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 313, + 506, + 402 + ], + "spans": [ + { + "bbox": [ + 104, + 313, + 506, + 402 + ], + "type": "text", + "content": "To ensure the accuracy of xVerify's training and evaluation, we conducted multiple rounds of automatic and manual annotation across the three datasets. Specifically, we used GPT-4o to perform two rounds of annotation for all samples in the datasets, utilizing two distinct prompt templates (details provided in Appendix D.2) to improve annotation confidence [33, 22]. Given the large size of the training set, we only applied manual annotation to the more challenging math problems and to samples where the two rounds of GPT-4o annotations disagreed. In contrast, for the test and generalization sets, we manually annotated all samples, resulting in a three-round annotation process to maximize label reliability. Details of the manual annotation process are provided in Appendix B.2." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 413, + 221, + 425 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 413, + 221, + 425 + ], + "spans": [ + { + "bbox": [ + 105, + 413, + 221, + 425 + ], + "type": "text", + "content": "4.1.4 Data Augmentation" + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 138, + 434, + 476, + 586 + ], + "blocks": [ + { + "bbox": [ + 138, + 434, + 476, + 586 + ], + "lines": [ + { + "bbox": [ + 138, + 434, + 476, + 586 + ], + "spans": [ + { + "bbox": [ + 138, + 434, + 476, + 586 + ], + "type": "image", + "image_path": "c7b64161e6bad9b6a55e1d24b6081bbc80d1590623a46a0a5887f61e520e322d.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 590, + 504, + 635 + ], + "lines": [ + { + "bbox": [ + 104, + 590, + 504, + 635 + ], + "spans": [ + { + "bbox": [ + 104, + 590, + 504, + 635 + ], + "type": "text", + "content": "Figure 2: Data Augmentation Pipelines: (1) transformation of multiple-choice options through numbering conversion and noise injection, (2) diversification of mathematical answers via equivalent expression generation, and (3) final answer sentence transformation using prompt rephrasing, symbol wrapping, and gap token insertion." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 639, + 506, + 673 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 639, + 506, + 673 + ], + "spans": [ + { + "bbox": [ + 104, + 639, + 506, + 673 + ], + "type": "text", + "content": "To further enhance the diversity and robustness of the dataset, we designed a series of data augmentation strategies (illustrated in Figure 2) to better simulate real-world evaluation settings and improve the model's tolerance to varied answer formats." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 677, + 504, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 677, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 677, + 504, + 723 + ], + "type": "text", + "content": "For multiple-choice questions, we applied two types of augmentations: option index transformation and noise injection. The former converts alphabetical labels to Arabic or Roman numerals, while the latter randomly adds or removes irrelevant distractor options without changing the original question intent, thereby increasing structural complexity." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 742, + 308, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 742, + 308, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 742, + 308, + 750 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 72, + 504, + 139 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 72, + 504, + 139 + ], + "spans": [ + { + "bbox": [ + 104, + 72, + 504, + 139 + ], + "type": "text", + "content": "For math problems, we used two approaches: augmentation based on reference answers and LLM responses. In the first approach, we generated 3-5 mathematically equivalent expressions of each reference answer through symbolic and formal transformations, then created new samples accordingly. In the second, we applied the same transformation logic to the final answers in LLM responses, enriching the dataset with varied mathematical formats and helping the model learn equivalence across symbolic expressions." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 144, + 506, + 212 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 144, + 506, + 212 + ], + "spans": [ + { + "bbox": [ + 104, + 144, + 506, + 212 + ], + "type": "text", + "content": "We also augmented the final answer statements. Specifically, we extracted answer-bearing sentences from responses generated using restrict prompts, and applied over 1,000 transformation patterns. These included: 20 variations of prompt rephrasing (e.g., \"The answer is B\" " + }, + { + "bbox": [ + 104, + 144, + 506, + 212 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 104, + 144, + 506, + 212 + ], + "type": "text", + "content": " \"The most appropriate answer is B\"), 18 symbolic wrappers (e.g., wrapping B as " + }, + { + "bbox": [ + 104, + 144, + 506, + 212 + ], + "type": "inline_equation", + "content": "B" + }, + { + "bbox": [ + 104, + 144, + 506, + 212 + ], + "type": "text", + "content": "), and 5 forms of delimiter insertions (e.g., adding a colon or space before the answer). This improved diversity in answer formats and reduced overfitting to specific templates." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 217, + 504, + 274 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 217, + 504, + 274 + ], + "spans": [ + { + "bbox": [ + 104, + 217, + 504, + 274 + ], + "type": "text", + "content": "Together, these strategies expanded the expressive space of the dataset while preserving semantic consistency, offering richer and more challenging training signals for xVerify. After augmentation, the sizes of the training, test, and generalization sets increased to 43,204, 6,122, and 6,468 samples respectively. Full dataset details are provided in Appendix B.1. The augmentation of math problems primarily relied on GPT-4o; prompt templates are listed in Appendix D.3." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 287, + 197, + 300 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 287, + 197, + 300 + ], + "spans": [ + { + "bbox": [ + 105, + 287, + 197, + 300 + ], + "type": "text", + "content": "4.2 Model Training" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 308, + 504, + 418 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 308, + 504, + 418 + ], + "spans": [ + { + "bbox": [ + 104, + 308, + 504, + 418 + ], + "type": "text", + "content": "We trained 14 models with different parameter sizes and architectures using the training set from the VAR dataset. Specifically, we utilized the LLaMA-Factory framework [38] and QLoRA technique [4] for model training. Based on extensive experimentation, we set the number of epochs to 1 and selected a learning rate of 1e-4 as the optimal configuration, with other hyperparameters detailed in Appendix C.1. Many researchers have pointed out potential bias in using LLMs as judge models, where models from the same family tend to receive higher ratings [19]. To thoroughly evaluate the generalization capability of the xVerify method, we trained 14 models with varying parameter sizes and architectures. These models ranged from 0.5B to 32B parameters and included five different families, such as LLaMA 3 [6], Qwen2.5 [37], and Gemma 2 [30]. Details of the models used are provided in Appendix C.2." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 436, + 192, + 450 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 436, + 192, + 450 + ], + "spans": [ + { + "bbox": [ + 105, + 436, + 192, + 450 + ], + "type": "text", + "content": "5 Experiments" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 463, + 504, + 486 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 463, + 504, + 486 + ], + "spans": [ + { + "bbox": [ + 104, + 463, + 504, + 486 + ], + "type": "text", + "content": "In this section, we will present the configuration, results, and detailed analysis of the xVerify model evaluation experiments. First, we will outline the experimental setup:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 497, + 504, + 663 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 132, + 497, + 504, + 541 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 497, + 504, + 541 + ], + "spans": [ + { + "bbox": [ + 132, + 497, + 504, + 541 + ], + "type": "text", + "content": "- Datasets: The datasets used in the evaluation experiments are the test set and generalization set from the VAR dataset. The test set is used to evaluate the xVerify model's performance, while the generalization set supplements the test set by simulating real-world scenarios with a broader sample distribution to assess the model's generalization ability." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 132, + 547, + 504, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 547, + 504, + 581 + ], + "spans": [ + { + "bbox": [ + 132, + 547, + 504, + 581 + ], + "type": "text", + "content": "- Metrics: The evaluation mainly uses accuracy and F1 score on both the test and generalization sets. Accuracy shows the model's overall performance, while the F1 score combines precision and recall for a more complete perspective." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 132, + 586, + 504, + 663 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 586, + 504, + 663 + ], + "spans": [ + { + "bbox": [ + 132, + 586, + 504, + 663 + ], + "type": "text", + "content": "- Baselines: There are two types of baselines: evaluation frameworks and judge models. The evaluation frameworks include DeepSeek-Math [29], LM Eval Harness [5], Math-Verify [13], OpenAI Evalu [25], OpenCompass [27], and UltraEval [9]. The judge models include PandaLM [34], Auto-J [21], Prometheus 2 [17], JudgeLM [39], and CompassJudger [1]. In addition, GPT-4o is also used as a judge model with two strategies: one with CoT and one without. The prompts for the judge model and xVerify are provided in Appendix D.4 and Appendix D.5." + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 104, + 677, + 504, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 677, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 677, + 504, + 723 + ], + "type": "text", + "content": "Test Set Evaluation Results. We evaluated all evaluation frameworks, judge models, and the xVerify model on the VAR test set (see Table 1). Overall, the xVerify model outperforms all evaluation frameworks and judge models, including GPT-4o, with the best and second-best values in each column appearing for the xVerify model." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 107, + 109, + 504, + 321 + ], + "blocks": [ + { + "bbox": [ + 105, + 76, + 504, + 109 + ], + "lines": [ + { + "bbox": [ + 105, + 76, + 504, + 109 + ], + "spans": [ + { + "bbox": [ + 105, + 76, + 504, + 109 + ], + "type": "text", + "content": "Table 1: Evaluation Accuracy Results on the Test Set. \"-\" indicates that the evaluation method is not applicable to the problem type. The best performance in each column will be shown in bold, and the second-best performance will be underlined." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 107, + 109, + 504, + 321 + ], + "lines": [ + { + "bbox": [ + 107, + 109, + 504, + 321 + ], + "spans": [ + { + "bbox": [ + 107, + 109, + 504, + 321 + ], + "type": "table", + "html": "
Method TypeMethodMultiple ChoiceMathShort AnswerClassificationOverall
F1Acc.F1Acc.F1Acc.F1Acc.F1Acc.
Evaluation FrameworkDeepSeek Math Verify70.77%75.17%78.34%84.30%----74.90%52.52%
LM Eval Harness58.44%68.19%25.16%28.27%53.41%44.51%72.35%66.94%47.67%48.32%
Math-Verify5.88%53.76%82.55%86.70%42.27%71.91%0.00%29.66%45.64%65.91%
OpenAI Simple Evals23.61%28.02%66.79%76.88%42.23%55.32%73.29%67.87%51.17%58.10%
OpenCompass68.11%72.52%79.25%84.73%----74.18%79.64%
UltraEval17.34%18.04%8.88%56.89%----13.95%40.71%
Judge ModelPandaLM-7B-v14.26%8.12%16.78%14.46%23.47%17.72%25.32%16.79%16.40%13.72%
Auto-J-Bilingual-6B52.85%67.71%40.76%65.21%67.22%79.60%74.86%71.37%57.04%69.59%
Auto-J-13B40.00%63.20%26.32%60.62%64.41%78.22%86.04%82.60%53.38%68.13%
Prometheus-7B-v2.075.76%75.41%74.20%74.35%70.95%74.59%84.80%77.03%76.50%75.11%
Prometheus-8x7B-v2.071.26%68.61%71.99%66.92%76.24%77.70%83.27%77.65%74.57%71.12%
JudgeLM-7B-v1.056.53%42.57%46.09%34.58%60.33%50.56%83.89%73.22%59.02%45.90%
JudgeLM-13B-v1.056.81%48.89%58.39%59.46%77.32%79.52%95.63%93.82%68.57%65.83%
JudgeLM-33B-v1.042.86%43.24%44.82%46.03%57.86%62.23%73.42%67.56%52.00%51.75%
CompassJudger-1-1.5B49.95%35.54%61.66%48.78%57.36%46.93%82.51%70.96%61.94%48.35%
CompassJudger-1-7B70.05%62.78%66.62%58.86%67.47%65.08%92.99%89.50%72.72%65.96%
CompassJudger-1-14B58.94%44.62%55.09%40.76%59.66%52.90%90.87%86.61%63.22%51.37%
CompassJudger-1-32B95.09%95.37%84.11%84.30%94.95%96.11%98.45%97.84%91.67%91.69%
GPT-4o as Judge96.61%96.75%95.27%95.80%95.01%96.20%98.14%97.43%96.25%96.39%
GPT-4o as Judge (CoT)97.10%97.23%95.41%95.88%95.63%96.63%99.56%99.38%96.85%96.95%
xVerifyxVerify-0.5B-I97.78%97.90%93.74%94.64%96.72%97.49%99.71%99.59%96.69%96.85%
xVerify-3B-Ib97.31%97.41%95.65%96.18%96.38%97.23%99.78%99.69%97.17%97.27%
xVerify-7B-I97.75%97.84%95.94%96.44%96.51%97.32%99.78%99.69%97.41%97.50%
xVerify-9B-I97.43%97.53%95.75%96.27%96.06%96.97%99.78%99.69%97.19%97.29%
xVerify-14B-Ia97.49%97.59%95.73%96.22%95.41%96.46%99.63%99.49%97.06%97.16%
xVerify-32B-I97.81%97.90%95.88%96.31%96.18%97.06%99.71%99.59%97.32%97.40%
", + "image_path": "f9faf40a1f834a9f752075ed85d7300770dd78771941ebd3aaf9e3b87f121ceb.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 350, + 506, + 427 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 350, + 506, + 427 + ], + "spans": [ + { + "bbox": [ + 104, + 350, + 506, + 427 + ], + "type": "text", + "content": "Among the evaluation frameworks, the best performers were DeepSeek Math Verify and OpenCompass, but neither achieved an F1 score nor accuracy exceeding " + }, + { + "bbox": [ + 104, + 350, + 506, + 427 + ], + "type": "inline_equation", + "content": "80\\%" + }, + { + "bbox": [ + 104, + 350, + 506, + 427 + ], + "type": "text", + "content": ". Some evaluation frameworks were also not suitable for certain question types, which is an inherent limitation of rule-based methods—strong in specificity but limited in applicability. For instance, OpenCompass was completely unsuitable for short answer and classification questions. Additionally, the long reasoning processes generated by reasoning models made it difficult for evaluation frameworks to extract final answers, lowering their overall performance." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 431, + 506, + 575 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 431, + 506, + 575 + ], + "spans": [ + { + "bbox": [ + 104, + 431, + 506, + 575 + ], + "type": "text", + "content": "Among judge models, GPT-4o and CompassJudger showed the best overall performance. The CompassJudger-1-32B model achieved F1 score and accuracy of " + }, + { + "bbox": [ + 104, + 431, + 506, + 575 + ], + "type": "inline_equation", + "content": "91.67\\%" + }, + { + "bbox": [ + 104, + 431, + 506, + 575 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 431, + 506, + 575 + ], + "type": "inline_equation", + "content": "91.69\\%" + }, + { + "bbox": [ + 104, + 431, + 506, + 575 + ], + "type": "text", + "content": ", respectively. However, the model performed poorly on math questions, with both F1 score and accuracy below " + }, + { + "bbox": [ + 104, + 431, + 506, + 575 + ], + "type": "inline_equation", + "content": "85\\%" + }, + { + "bbox": [ + 104, + 431, + 506, + 575 + ], + "type": "text", + "content": ", indicating that it handles simpler questions well but struggles with formula equivalence in math problems. Furthermore, only the 32B version of this judge model achieved over " + }, + { + "bbox": [ + 104, + 431, + 506, + 575 + ], + "type": "inline_equation", + "content": "90\\%" + }, + { + "bbox": [ + 104, + 431, + 506, + 575 + ], + "type": "text", + "content": " F1 score and accuracy, while smaller models performed below " + }, + { + "bbox": [ + 104, + 431, + 506, + 575 + ], + "type": "inline_equation", + "content": "80\\%" + }, + { + "bbox": [ + 104, + 431, + 506, + 575 + ], + "type": "text", + "content": ". Therefore, the performance of CompassJudger-1-32B is more a result of the base model's capabilities rather than the subsequent training. For example, the smallest xVerify-0.5B-I model outperforms CompassJudger-1-32B across the board, indicating that the VAR training set significantly improves model evaluation performance. GPT-4o's overall performance is very close to xVerify, but the improvement after using CoT is small, with token consumption nearly doubling. Specifically, GPT-4o as Judge evaluated the entire test set at a cost of $13.09, while GPT-4o as Judge (CoT) cost $20.15 (using the OpenAI API, charged by token count)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 578, + 507, + 647 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 578, + 507, + 647 + ], + "spans": [ + { + "bbox": [ + 104, + 578, + 507, + 647 + ], + "type": "text", + "content": "In contrast, even the smallest xVerify-0.5B-I model outperforms all methods except GPT-4o as Judge (CoT) in overall performance, and the xVerify-3B-Ib model surpasses all others in every evaluation metric. Moreover, for more difficult math questions, all xVerify models except xVerify-0.5B-I exceeded " + }, + { + "bbox": [ + 104, + 578, + 507, + 647 + ], + "type": "inline_equation", + "content": "95\\%" + }, + { + "bbox": [ + 104, + 578, + 507, + 647 + ], + "type": "text", + "content": " performance. We also found that the performance of the xVerify model improves as the parameter size increases, but slightly decreases after exceeding 7B parameters, likely due to overfitting on the VAR training set, which is sufficiently large for smaller models." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 667, + 507, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 667, + 507, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 667, + 507, + 723 + ], + "type": "text", + "content": "Generalization Set Evaluation Results. To better assess the performance of xVerify on a broader sample distribution, we evaluated all methods on the VAR generalization set, as shown in Table 2. On the generalization set, the xVerify model showed a slight decrease in overall performance. However, the drop in both F1 score and accuracy was less than " + }, + { + "bbox": [ + 104, + 667, + 507, + 723 + ], + "type": "inline_equation", + "content": "1.5\\%" + }, + { + "bbox": [ + 104, + 667, + 507, + 723 + ], + "type": "text", + "content": ", while other methods showed mixed results. Overall, the xVerify model still outperformed all other methods, indicating that although" + } + ] + } + ], + "index": 5 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 6 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 72, + 504, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 72, + 504, + 95 + ], + "spans": [ + { + "bbox": [ + 105, + 72, + 504, + 95 + ], + "type": "text", + "content": "overfitting exists in xVerify, it is limited and the model maintains strong generalization ability on samples outside the training set distribution." + } + ] + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 107, + 145, + 504, + 357 + ], + "blocks": [ + { + "bbox": [ + 104, + 112, + 504, + 145 + ], + "lines": [ + { + "bbox": [ + 104, + 112, + 504, + 145 + ], + "spans": [ + { + "bbox": [ + 104, + 112, + 504, + 145 + ], + "type": "text", + "content": "Table 2: Evaluation Accuracy Results on the Generalization Set. \"--\" indicates that the evaluation method is not applicable to the problem type. The best performance in each column will be shown in bold, and the second-best performance will be underlined." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 107, + 145, + 504, + 357 + ], + "lines": [ + { + "bbox": [ + 107, + 145, + 504, + 357 + ], + "spans": [ + { + "bbox": [ + 107, + 145, + 504, + 357 + ], + "type": "table", + "html": "
Method TypeMethodMultiple ChoiceMathShort AnswerClassificationOverall
F1Acc.F1Acc.F1Acc.F1Acc.F1Acc.
Evaluation FrameworkDeepSeek Math Verify72.90%73.39%11.69%79.83%----60.57%44.42%
LM Eval Harness61.60%65.37%7.03%18.48%58.22%45.09%92.06%88.21%55.81%51.30%
Math-Verify5.19%45.10%64.18%87.68%9.12%52.75%0.00%24.59%16.10%55.53%
OpenAI Simple Evals28.72%29.23%24.31%78.90%58.33%59.58%94.39%91.62%57.99%63.36%
OpenCompass71.64%71.44%47.22%84.39%----65.74%78.18%
UltraEval16.29%15.31%13.55%78.39%----15.71%48.13%
Judge ModelPandaLM-7B-v14.28%7.85%9.91%15.97%45.81%31.43%36.23%25.99%23.74%19.14%
Auto-J-Bilingual-6B52.07%60.75%10.56%74.79%85.16%86.76%84.90%79.91%67.20%74.57%
Auto-J-13B34.87%52.78%9.86%76.54%85.12%86.97%77.67%71.99%60.43%71.35%
Prometheus-7B-v2.076.67%73.66%49.08%71.46%81.52%81.32%79.59%71.92%73.85%74.35%
Prometheus-8x7B-v2.074.13%68.60%49.48%60.27%87.15%86.13%84.70%77.19%74.51%71.69%
JudgeLM-7B-v1.060.22%45.71%12.71%15.40%72.15%62.51%86.11%76.18%59.11%46.38%
JudgeLM-13B-v1.065.39%57.80%21.61%44.87%86.11%84.53%91.78%86.89%69.18%65.63%
JudgeLM-33B-v1.046.99%45.10%20.31%39.99%71.34%66.69%41.92%33.36%46.06%46.01%
CompassJudger-1-1.5B55.75%40.87%34.53%33.62%63.93%51.57%84.49%73.93%60.01%47.65%
CompassJudger-1-7B74.31%65.20%38.27%39.89%88.99%88.15%93.29%89.29%73.47%67.47%
CompassJudger-1-14B63.65%49.50%27.63%21.20%73.61%66.48%88.97%81.92%63.10%51.21%
CompassJudger-1-32B92.93%92.32%72.05%84.91%96.81%96.86%98.05%97.05%91.90%92.04%
GPT-4o as Judge95.86%95.38%87.91%94.76%97.46%97.49%98.67%97.98%96.03%96.18%
GPT-4o as Judge (CoT)95.44%94.88%88.34%94.71%97.39%97.42%98.36%97.52%95.79%95.92%
xVerifyxVerify-0.5B-I96.49%96.10%80.00%91.94%96.95%97.00%99.03%98.53%95.29%95.53%
xVerify-3B-Ib96.21%95.71%86.20%94.15%97.60%97.63%99.03%98.53%96.08%96.23%
xVerify-7B-I96.16%95.66%87.86%94.87%97.45%97.49%98.93%98.37%96.22%96.37%
xVerify-9B-I96.06%95.55%87.47%94.76%97.53%97.56%99.13%98.68%96.23%96.38%
xVerify-14B-Ia96.11%95.60%90.20%95.74%97.32%97.35%99.13%98.68%96.53%96.65%
xVerify-32B-I96.22%95.71%90.09%95.59%97.32%97.35%99.03%98.53%96.50%96.60%
", + "image_path": "9692347bd77c26f19e069abddee6ccb60a7779865398c8c3609ace614cb755fb.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 368, + 506, + 413 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 368, + 506, + 413 + ], + "spans": [ + { + "bbox": [ + 104, + 368, + 506, + 413 + ], + "type": "text", + "content": "Specifically, the overall F1 score and accuracy of all evaluation frameworks remained below " + }, + { + "bbox": [ + 104, + 368, + 506, + 413 + ], + "type": "inline_equation", + "content": "80\\%" + }, + { + "bbox": [ + 104, + 368, + 506, + 413 + ], + "type": "text", + "content": " with only OpenCompass achieving an overall accuracy above " + }, + { + "bbox": [ + 104, + 368, + 506, + 413 + ], + "type": "inline_equation", + "content": "70\\%" + }, + { + "bbox": [ + 104, + 368, + 506, + 413 + ], + "type": "text", + "content": ". This indicates that rule-based evaluation frameworks have significant limitations in generalization performance, struggling to effectively handle the diverse answers and evaluation sets from LLMs." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 417, + 505, + 517 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 417, + 505, + 517 + ], + "spans": [ + { + "bbox": [ + 104, + 417, + 505, + 517 + ], + "type": "text", + "content": "Among the judge models, the best-performing ones remained GPT-4o and CompassJudger. However, all judge models except GPT-4o had an F1 score below " + }, + { + "bbox": [ + 104, + 417, + 505, + 517 + ], + "type": "inline_equation", + "content": "75\\%" + }, + { + "bbox": [ + 104, + 417, + 505, + 517 + ], + "type": "text", + "content": " on math questions, with most models scoring below " + }, + { + "bbox": [ + 104, + 417, + 505, + 517 + ], + "type": "inline_equation", + "content": "50\\%" + }, + { + "bbox": [ + 104, + 417, + 505, + 517 + ], + "type": "text", + "content": ", indicating that judge models almost entirely fail in evaluating more diverse and complex math problems. GPT-4o as Judge and GPT-4o as Judge (CoT) also failed to achieve an F1 score above " + }, + { + "bbox": [ + 104, + 417, + 505, + 517 + ], + "type": "inline_equation", + "content": "90\\%" + }, + { + "bbox": [ + 104, + 417, + 505, + 517 + ], + "type": "text", + "content": " on math problems, suggesting that the math samples in the generalization set indeed present challenges for evaluation methods. Furthermore, GPT-4o's performance did not improve after using CoT; instead, it showed a slight decline. This suggests that in broader scenarios, CoT-based prompt engineering methods do not effectively improve GPT-4o's performance as a judge model, and model fine-tuning may be a better option." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 521, + 506, + 598 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 521, + 506, + 598 + ], + "spans": [ + { + "bbox": [ + 104, + 521, + 506, + 598 + ], + "type": "text", + "content": "In contrast, the xVerify-0.5B-I model outperformed all evaluation methods except GPT-4o, and the xVerify-3B-Ib model outperformed both CoT-based GPT-4o methods. For more difficult math problems, the F1 score and accuracy of the xVerify-14B-Ia and xVerify-32B-I models exceeded " + }, + { + "bbox": [ + 104, + 521, + 506, + 598 + ], + "type": "inline_equation", + "content": "90\\%" + }, + { + "bbox": [ + 104, + 521, + 506, + 598 + ], + "type": "text", + "content": ". Additionally, we observed that as the parameter size of the xVerify model increased, the performance drop on the generalization set decreased. For example, the accuracy drop for xVerify-0.5B-I was " + }, + { + "bbox": [ + 104, + 521, + 506, + 598 + ], + "type": "inline_equation", + "content": "1.33\\%" + }, + { + "bbox": [ + 104, + 521, + 506, + 598 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 521, + 506, + 598 + ], + "type": "inline_equation", + "content": "0.91\\%" + }, + { + "bbox": [ + 104, + 521, + 506, + 598 + ], + "type": "text", + "content": " for xVerify-9B-I, and " + }, + { + "bbox": [ + 104, + 521, + 506, + 598 + ], + "type": "inline_equation", + "content": "0.80\\%" + }, + { + "bbox": [ + 104, + 521, + 506, + 598 + ], + "type": "text", + "content": " for xVerify-32B-I, suggesting that larger xVerify models exhibit stronger generalization performance." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 603, + 506, + 658 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 603, + 506, + 658 + ], + "spans": [ + { + "bbox": [ + 104, + 603, + 506, + 658 + ], + "type": "text", + "content": "Furthermore, we comprehensively evaluated the performance of 14 x Verify models on both the test and generalization sets, and tested the computational efficiency of all x Verify and judge models, along with the evaluation cost of GPT-4o as a judge model. The results showed that x Verify models outperform other judge models in both usage cost and evaluation efficiency. Full experimental results can be found in Appendix E." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 674, + 185, + 687 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 674, + 185, + 687 + ], + "spans": [ + { + "bbox": [ + 105, + 674, + 185, + 687 + ], + "type": "text", + "content": "6 Conclusion" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 700, + 504, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 700, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 700, + 504, + 723 + ], + "type": "text", + "content": "In this paper, we propose an efficient answer verifier for reasoning model evaluations, named xVerify, which can effectively assess the correctness of long reasoning responses generated by" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 301, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 301, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 301, + 741, + 309, + 750 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 72, + 506, + 172 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 72, + 506, + 172 + ], + "spans": [ + { + "bbox": [ + 104, + 72, + 506, + 172 + ], + "type": "text", + "content": "reasoning models on various difficult objective questions. To train and evaluate the xVerify model, we constructed the VAR dataset based on several popular LLMs and evaluation sets. This dataset primarily collects long reasoning responses generated by reasoning models on challenging questions, and multiple rounds of labeling and verification were conducted using GPT-4o and human annotators. Ultimately, we trained multiple xVerify models of varying specifications based on the VAR dataset and performed comparative evaluations with several evaluation frameworks and judge models on both the test and generalization sets. The experimental results show that even the smallest xVerify-0.5B-I model outperforms all methods except GPT-4o, and larger xVerify models surpass all other methods, demonstrating the effectiveness and generalization ability of xVerify." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 186, + 164, + 198 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 186, + 164, + 198 + ], + "spans": [ + { + "bbox": [ + 106, + 186, + 164, + 198 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 106, + 205, + 506, + 721 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 111, + 205, + 505, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 205, + 505, + 239 + ], + "spans": [ + { + "bbox": [ + 111, + 205, + 505, + 239 + ], + "type": "text", + "content": "[1] Maosong Cao, Alexander Lam, Haodong Duan, Hongwei Liu, Songyang Zhang, and Kai Chen. Compassjudger-1: All-in-one judge model helps model evaluation and evolution. arXiv preprint arXiv:2410.16256, 2024." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 111, + 245, + 505, + 280 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 245, + 505, + 280 + ], + "spans": [ + { + "bbox": [ + 111, + 245, + 505, + 280 + ], + "type": "text", + "content": "[2] Yupeng Chang, Xu Wang, Jindong Wang, Yuan Wu, Linyi Yang, Kaijie Zhu, Hao Chen, Xiaoyuan Yi, Cunxiang Wang, Yidong Wang, et al. A survey on evaluation of large language models. ACM transactions on intelligent systems and technology, 15(3):1-45, 2024." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 111, + 285, + 504, + 308 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 285, + 504, + 308 + ], + "spans": [ + { + "bbox": [ + 111, + 285, + 504, + 308 + ], + "type": "text", + "content": "[3] DeepSeek-AI, Daya Guo, Dejian Yang, et al. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning, 2025." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 111, + 315, + 506, + 348 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 315, + 506, + 348 + ], + "spans": [ + { + "bbox": [ + 111, + 315, + 506, + 348 + ], + "type": "text", + "content": "[4] Tim Dettmers, Artidoro Pagnoni, Ari Holtzman, and Luke Zettlemoyer. Qlora: Efficient finetuning of quantized llms. Advances in neural information processing systems, 36:10088-10115, 2023." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 111, + 355, + 505, + 411 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 355, + 505, + 411 + ], + "spans": [ + { + "bbox": [ + 111, + 355, + 505, + 411 + ], + "type": "text", + "content": "[5] Leo Gao, Jonathan Tow, Baber Abbasi, Stella Biderman, Sid Black, Anthony DiPofi, Charles Foster, Laurence Golding, Jeffrey Hsu, Alain Le Noac'h, Haonan Li, Kyle McDonell, Niklas Muennighoff, Chris Ociepa, Jason Phang, Laria Reynolds, Hailey Schoelkopf, Aviya Skowron, Lintang Sutawika, Eric Tang, Anish Thite, Ben Wang, Kevin Wang, and Andy Zou. A framework for few-shot language model evaluation, September 2021." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 111, + 418, + 505, + 430 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 418, + 505, + 430 + ], + "spans": [ + { + "bbox": [ + 111, + 418, + 505, + 430 + ], + "type": "text", + "content": "[6] Aaron Grattafori, Abhimanyu Dubey, Abhinav Jauhri, et al. The llama 3 herd of models, 2024." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 111, + 436, + 505, + 469 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 436, + 505, + 469 + ], + "spans": [ + { + "bbox": [ + 111, + 436, + 505, + 469 + ], + "type": "text", + "content": "[7] Jiawei Gu, Xuhui Jiang, Zhichao Shi, Hexiang Tan, Xuehao Zhai, Chengjin Xu, Wei Li, Yinghan Shen, Shengjie Ma, Honghao Liu, Saizhuo Wang, Kun Zhang, Yuzhuo Wang, Wen Gao, Lionel Ni, and Jian Guo. A survey on llm-as-a-judge, 2025." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 111, + 476, + 505, + 510 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 476, + 505, + 510 + ], + "spans": [ + { + "bbox": [ + 111, + 476, + 505, + 510 + ], + "type": "text", + "content": "[8] Zishan Guo, Renren Jin, Chuang Liu, Yufei Huang, Dan Shi, Supryadi, Linhao Yu, Yan Liu, Jiaxuan Li, Bojian Xiong, and Deyi Xiong. Evaluating large language models: A comprehensive survey, 2023." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 111, + 517, + 505, + 550 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 517, + 505, + 550 + ], + "spans": [ + { + "bbox": [ + 111, + 517, + 505, + 550 + ], + "type": "text", + "content": "[9] Chaoqun He, Renjie Luo, Shengding Hu, Yuanqian Zhao, Jie Zhou, Hanghao Wu, Jiajie Zhang, Xu Han, Zhiyuan Liu, and Maosong Sun. Ultraeval: A lightweight platform for flexible and comprehensive evaluation for llms. arXiv preprint arXiv:2404.07584, 2024." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 107, + 556, + 504, + 590 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 556, + 504, + 590 + ], + "spans": [ + { + "bbox": [ + 107, + 556, + 504, + 590 + ], + "type": "text", + "content": "[10] Yancheng He, Shilong Li, Jiaheng Liu, Weixun Wang, Xingyuan Bu, Ge Zhang, Zhongyuan Peng, Zhaoxiang Zhang, Zhicheng Zheng, Wenbo Su, and Bo Zheng. Can large language models detect errors in long chain-of-thought reasoning?, 2025." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 106, + 597, + 504, + 641 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 597, + 504, + 641 + ], + "spans": [ + { + "bbox": [ + 106, + 597, + 504, + 641 + ], + "type": "text", + "content": "[11] Dan Hendrycks, Collin Burns, Saurav Kadavath, Akul Arora, Steven Basart, Eric Tang, Dawn Song, and Jacob Steinhardt. Measuring mathematical problem solving with the math dataset. In J. Vanschoeren and S. Yeung, editors, Proceedings of the Neural Information Processing Systems Track on Datasets and Benchmarks, volume 1, 2021." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 106, + 648, + 506, + 682 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 648, + 506, + 682 + ], + "spans": [ + { + "bbox": [ + 106, + 648, + 506, + 682 + ], + "type": "text", + "content": "[12] Xinyu Hu, Li Lin, Mingqi Gao, Xunjian Yin, and Xiaojun Wan. Themis: A reference-free nlg evaluation language model with flexibility and interpretability. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, pages 15924-15951, 2024." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 107, + 689, + 506, + 721 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 689, + 506, + 721 + ], + "spans": [ + { + "bbox": [ + 107, + 689, + 506, + 721 + ], + "type": "text", + "content": "[13] Greg Gandenberger Hynek Kydlíček. GitHub - huggingface/Math-Verify: A robust mathematical expression evaluation system designed for assessing Large Language Model outputs in mathematical tasks., 2024." + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 106, + 72, + 506, + 723 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 106, + 72, + 506, + 139 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 72, + 506, + 139 + ], + "spans": [ + { + "bbox": [ + 106, + 72, + 506, + 139 + ], + "type": "text", + "content": "[14] Alon Jacovi, Yonatan Bitton, Bernd Bohnet, Jonathan Herzig, Or Honovich, Michael Tseng, Michael Collins, Roee Aharoni, and Mor Geva. A chain-of-thought is as strong as its weakest link: A benchmark for verifiers of reasoning chains. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 4615–4634, Bangkok, Thailand, August 2024. Association for Computational Linguistics." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 145, + 505, + 178 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 145, + 505, + 178 + ], + "spans": [ + { + "bbox": [ + 106, + 145, + 505, + 178 + ], + "type": "text", + "content": "[15] Aaron Jaech, Adam Kalai, Adam Lerer, Adam Richardson, Ahmed El-Kishky, Aiden Low, Alec Helyar, Aleksander Madry, Alex Beutel, Alex Carney, et al. Openai o1 system card. arXiv preprint arXiv:2412.16720, 2024." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 106, + 185, + 505, + 240 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 185, + 505, + 240 + ], + "spans": [ + { + "bbox": [ + 106, + 185, + 505, + 240 + ], + "type": "text", + "content": "[16] Pei Ke, Bosi Wen, Zhuoer Feng, Xiao Liu, Xuanyu Lei, Jiale Cheng, Shengyuan Wang, Aohan Zeng, Yuxiao Dong, Hongning Wang, Jie Tang, and Minlie Huang. Critiquellm: Towards an informative critique generation model for evaluation of large language model generation. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics, 2024." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 247, + 505, + 315 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 247, + 505, + 315 + ], + "spans": [ + { + "bbox": [ + 106, + 247, + 505, + 315 + ], + "type": "text", + "content": "[17] Seungone Kim, Juyoung Suk, Shayne Longpre, Bill Yuchen Lin, Jamin Shin, Sean Welleck, Graham Neubig, Moontae Lee, Kyungjae Lee, and Minjoon Seo. *Prometheus* 2: An open source language model specialized in evaluating other language models. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen, editors, *Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing*, pages 4334–4353, Miami, Florida, USA, November 2024. Association for Computational Linguistics." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 320, + 505, + 355 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 320, + 505, + 355 + ], + "spans": [ + { + "bbox": [ + 106, + 320, + 505, + 355 + ], + "type": "text", + "content": "[18] Dawei Li, Bohan Jiang, Liangjie Huang, Alimohammad Beigi, Chengshuai Zhao, Zhen Tan, Amrita Bhattacharjee, Yuxuan Jiang, Canyu Chen, Tianhao Wu, Kai Shu, Lu Cheng, and Huan Liu. From generation to judgment: Opportunities and challenges of llm-as-a-judge, 2025." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 361, + 505, + 395 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 361, + 505, + 395 + ], + "spans": [ + { + "bbox": [ + 106, + 361, + 505, + 395 + ], + "type": "text", + "content": "[19] Dawei Li, Renliang Sun, Yue Huang, Ming Zhong, Bohan Jiang, Jiawei Han, Xiangliang Zhang, Wei Wang, and Huan Liu. Preference leakage: A contamination problem in llm-as-a-judge, 2025." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 106, + 401, + 505, + 425 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 401, + 505, + 425 + ], + "spans": [ + { + "bbox": [ + 106, + 401, + 505, + 425 + ], + "type": "text", + "content": "[20] Haitao Li, Qian Dong, Junjie Chen, Huixue Su, Yujia Zhou, Qingyao Ai, Ziyi Ye, and Yiqun Liu. Llms-as-judges: A comprehensive survey on llm-based evaluation methods, 2024." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 430, + 505, + 464 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 430, + 505, + 464 + ], + "spans": [ + { + "bbox": [ + 106, + 430, + 505, + 464 + ], + "type": "text", + "content": "[21] Junlong Li, Shichao Sun, Weizhe Yuan, Run-Ze Fan, hai zhao, and Pengfei Liu. Generative judge for evaluating alignment. In The Twelfth International Conference on Learning Representations, 2024." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 106, + 471, + 505, + 506 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 471, + 505, + 506 + ], + "spans": [ + { + "bbox": [ + 106, + 471, + 505, + 506 + ], + "type": "text", + "content": "[22] Xun Liang, Shichao Song, Zifan Zheng, Hanyu Wang, Qingchen Yu, Xunkai Li, Rong-Hua Li, Yi Wang, Zhonghao Wang, Feiyu Xiong, and Zhiyu Li. Internal consistency and self-feedback in large language models: A survey, 2024." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 511, + 505, + 545 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 511, + 505, + 545 + ], + "spans": [ + { + "bbox": [ + 106, + 511, + 505, + 545 + ], + "type": "text", + "content": "[23] Junnan Liu, Hongwei Liu, Linchen Xiao, Ziyi Wang, Kuikun Liu, Songyang Gao, Wenwei Zhang, Songyang Zhang, and Kai Chen. Are your llms capable of stable reasoning? arXiv preprint arXiv:2412.13147, 2024." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 106, + 552, + 505, + 575 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 552, + 505, + 575 + ], + "spans": [ + { + "bbox": [ + 106, + 552, + 505, + 575 + ], + "type": "text", + "content": "[24] MAA. American invitational mathematics examination - aide. American Invitational Mathematics Examination - AIME 2024, February 2024." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 106, + 581, + 505, + 605 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 581, + 505, + 605 + ], + "spans": [ + { + "bbox": [ + 106, + 581, + 505, + 605 + ], + "type": "text", + "content": "[25] OpenAI. GitHub - openai/evals: Evals is a framework for evaluating LLMs and LLM systems, and an open-source registry of benchmarks., 2024." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 106, + 610, + 258, + 623 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 610, + 258, + 623 + ], + "spans": [ + { + "bbox": [ + 106, + 610, + 258, + 623 + ], + "type": "text", + "content": "[26] OpenAI. Openai o3-mini, 2025." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 106, + 629, + 505, + 653 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 629, + 505, + 653 + ], + "spans": [ + { + "bbox": [ + 106, + 629, + 505, + 653 + ], + "type": "text", + "content": "[27] OpenMMLab. Opencompass: A universal evaluation platform for foundation models. https://github.com/open-compass/opencompass, 2023." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 106, + 659, + 505, + 693 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 659, + 505, + 693 + ], + "spans": [ + { + "bbox": [ + 106, + 659, + 505, + 693 + ], + "type": "text", + "content": "[28] David Rein, Betty Li Hou, Asa Cooper Stickland, Jackson Petty, Richard Yuanzhe Pang, Julien Dirani, Julian Michael, and Samuel R. Bowman. GPQA: A graduate-level google-proof q&a benchmark. In First Conference on Language Modeling, 2024." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 106, + 699, + 505, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 699, + 505, + 723 + ], + "spans": [ + { + "bbox": [ + 106, + 699, + 505, + 723 + ], + "type": "text", + "content": "[29] Zhihong Shao, Peiyi Wang, Qihao Zhu, et al. Deepseekmath: Pushing the limits of mathematical reasoning in open language models, 2024." + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 106, + 72, + 506, + 515 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 106, + 72, + 505, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 72, + 505, + 95 + ], + "spans": [ + { + "bbox": [ + 106, + 72, + 505, + 95 + ], + "type": "text", + "content": "[30] Gemma Team, Morgane Riviere, Shreya Pathak, et al. Gemma 2: Improving open language models at a practical size, 2024." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 102, + 474, + 115 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 102, + 474, + 115 + ], + "spans": [ + { + "bbox": [ + 106, + 102, + 474, + 115 + ], + "type": "text", + "content": "[31] Qwen Team. Qwq-32b: Embracing the power of reinforcement learning, March 2025." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 106, + 121, + 506, + 177 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 121, + 506, + 177 + ], + "spans": [ + { + "bbox": [ + 106, + 121, + 506, + 177 + ], + "type": "text", + "content": "[32] Xinpeng Wang, Bolei Ma, Chengzhi Hu, Leon Weber-Genzel, Paul Röttger, Frauke Kreuter, Dirk Hovy, and Barbara Plank. \"my answer is C\": First-token probabilities do not match text answers in instruction-tuned language models. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Findings of the Association for Computational Linguistics: ACL 2024, pages 7407–7416, Bangkok, Thailand, August 2024. Association for Computational Linguistics." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 183, + 504, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 183, + 504, + 217 + ], + "spans": [ + { + "bbox": [ + 106, + 183, + 504, + 217 + ], + "type": "text", + "content": "[33] Xuezhi Wang, Jason Wei, Dale Schuurmans, Quoc V Le, Ed H. Chi, Sharan Narang, Aakanksha Chowdhery, and Denny Zhou. Self-consistency improves chain of thought reasoning in language models. In The Eleventh International Conference on Learning Representations, 2023." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 224, + 504, + 258 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 224, + 504, + 258 + ], + "spans": [ + { + "bbox": [ + 106, + 224, + 504, + 258 + ], + "type": "text", + "content": "[34] Yidong Wang, Zhuohao Yu, Zhengran Zeng, Linyi Yang, Cunxiang Wang, Hao Chen, Chaoya Jiang, Rui Xie, Jindong Wang, Xing Xie, Wei Ye, Shikun Zhang, and Yue Zhang. Pandalm: An automatic evaluation benchmark for llm instruction tuning optimization. 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 264, + 506, + 320 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 264, + 506, + 320 + ], + "spans": [ + { + "bbox": [ + 106, + 264, + 506, + 320 + ], + "type": "text", + "content": "[35] Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, brian richter, Fei Xia, Ed Chi, Quoc V Le, and Denny Zhou. Chain-of-thought prompting elicits reasoning in large language models. In S. Koyejo, S. Mohamed, A. Agarwal, D. Belgrave, K. Cho, and A. Oh, editors, Advances in Neural Information Processing Systems, volume 35, pages 24824-24837. Curran Associates, Inc., 2022." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 106, + 327, + 506, + 382 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 327, + 506, + 382 + ], + "spans": [ + { + "bbox": [ + 106, + 327, + 506, + 382 + ], + "type": "text", + "content": "[36] Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, brian richter, Fei Xia, Ed Chi, Quoc V Le, and Denny Zhou. Chain-of-thought prompting elicits reasoning in large language models. In S. Koyejo, S. Mohamed, A. Agarwal, D. Belgrave, K. Cho, and A. Oh, editors, Advances in Neural Information Processing Systems, volume 35, pages 24824-24837. Curran Associates, Inc., 2022." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 389, + 504, + 412 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 389, + 504, + 412 + ], + "spans": [ + { + "bbox": [ + 106, + 389, + 504, + 412 + ], + "type": "text", + "content": "[37] An Yang, Baosong Yang, Beichen Zhang, et al. Qwen2.5 technical report. arXiv preprint arXiv:2412.15115, 2024." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 106, + 419, + 504, + 475 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 419, + 504, + 475 + ], + "spans": [ + { + "bbox": [ + 106, + 419, + 504, + 475 + ], + "type": "text", + "content": "[38] Yaowei Zheng, Richong Zhang, Junhao Zhang, Yanhan Ye, Zheyan Luo, Zhangchi Feng, and Yongqiang Ma. Llamafactory: Unified efficient fine-tuning of " + }, + { + "bbox": [ + 106, + 419, + 504, + 475 + ], + "type": "inline_equation", + "content": "100+" + }, + { + "bbox": [ + 106, + 419, + 504, + 475 + ], + "type": "text", + "content": " language models. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 3: System Demonstrations), Bangkok, Thailand, 2024. Association for Computational Linguistics." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 482, + 506, + 515 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 482, + 506, + 515 + ], + "spans": [ + { + "bbox": [ + 106, + 482, + 506, + 515 + ], + "type": "text", + "content": "[39] Lianghui Zhu, Xinggang Wang, and Xinlong Wang. JudgeLM: Fine-tuned large language models are scalable judges. In The Thirteenth International Conference on Learning Representations, 2025." + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 69, + 209, + 92 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 69, + 209, + 92 + ], + "spans": [ + { + "bbox": [ + 105, + 69, + 209, + 92 + ], + "type": "text", + "content": "Appendices" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 117, + 505, + 155 + ], + "type": "list", + "angle": 0, + "index": 3, + "blocks": [ + { + "bbox": [ + 107, + 117, + 505, + 129 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 117, + 505, + 129 + ], + "spans": [ + { + "bbox": [ + 107, + 117, + 505, + 129 + ], + "type": "text", + "content": "A Datasets and Models 14" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 106, + 143, + 504, + 155 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 143, + 504, + 155 + ], + "spans": [ + { + "bbox": [ + 106, + 143, + 504, + 155 + ], + "type": "text", + "content": "B VAR Dataset Details 14" + } + ] + } + ], + "index": 2 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 120, + 159, + 504, + 205 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 120, + 159, + 504, + 172 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 159, + 504, + 172 + ], + "spans": [ + { + "bbox": [ + 120, + 159, + 504, + 172 + ], + "type": "text", + "content": "B.1 Details of Training, Test, and Generalization Sets 15" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 121, + 176, + 504, + 188 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 176, + 504, + 188 + ], + "spans": [ + { + "bbox": [ + 121, + 176, + 504, + 188 + ], + "type": "text", + "content": "B.2 Details of Human Annotation 19" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 121, + 193, + 504, + 205 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 193, + 504, + 205 + ], + "spans": [ + { + "bbox": [ + 121, + 193, + 504, + 205 + ], + "type": "text", + "content": "B.3 Examples from the VAR Dataset 21" + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 106, + 219, + 505, + 231 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 219, + 505, + 231 + ], + "spans": [ + { + "bbox": [ + 106, + 219, + 505, + 231 + ], + "type": "text", + "content": "C Model Training Details 22" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 120, + 236, + 504, + 264 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 120, + 236, + 504, + 248 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 236, + 504, + 248 + ], + "spans": [ + { + "bbox": [ + 120, + 236, + 504, + 248 + ], + "type": "text", + "content": "C.1 Training Hyperparameters 22" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 121, + 251, + 504, + 264 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 251, + 504, + 264 + ], + "spans": [ + { + "bbox": [ + 121, + 251, + 504, + 264 + ], + "type": "text", + "content": "C.2 Original Model Details 22" + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 106, + 277, + 505, + 290 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 277, + 505, + 290 + ], + "spans": [ + { + "bbox": [ + 106, + 277, + 505, + 290 + ], + "type": "text", + "content": "D Prompts 22" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 120, + 294, + 504, + 373 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 120, + 294, + 504, + 307 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 294, + 504, + 307 + ], + "spans": [ + { + "bbox": [ + 120, + 294, + 504, + 307 + ], + "type": "text", + "content": "D.1 Prompts for Generating LLM Responses 22" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 121, + 311, + 504, + 323 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 311, + 504, + 323 + ], + "spans": [ + { + "bbox": [ + 121, + 311, + 504, + 323 + ], + "type": "text", + "content": "D.2 Prompts for GPT-4o Annotation 23" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 121, + 327, + 504, + 339 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 327, + 504, + 339 + ], + "spans": [ + { + "bbox": [ + 121, + 327, + 504, + 339 + ], + "type": "text", + "content": "D.3 Prompts for Data Augmentation 23" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 121, + 344, + 504, + 356 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 344, + 504, + 356 + ], + "spans": [ + { + "bbox": [ + 121, + 344, + 504, + 356 + ], + "type": "text", + "content": "D.4 Prompts for Judge Model 23" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 121, + 360, + 504, + 373 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 360, + 504, + 373 + ], + "spans": [ + { + "bbox": [ + 121, + 360, + 504, + 373 + ], + "type": "text", + "content": "D.5 Prompts for xVerify 25" + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 106, + 386, + 505, + 399 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 386, + 505, + 399 + ], + "spans": [ + { + "bbox": [ + 106, + 386, + 505, + 399 + ], + "type": "text", + "content": "E Supplementary Experimental Results 25" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 120, + 403, + 504, + 432 + ], + "type": "list", + "angle": 0, + "index": 22, + "blocks": [ + { + "bbox": [ + 120, + 403, + 504, + 415 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 403, + 504, + 415 + ], + "spans": [ + { + "bbox": [ + 120, + 403, + 504, + 415 + ], + "type": "text", + "content": "E.1 Evaluation Accuracy Results of All xVerify Models 25" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 121, + 419, + 504, + 432 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 419, + 504, + 432 + ], + "spans": [ + { + "bbox": [ + 121, + 419, + 504, + 432 + ], + "type": "text", + "content": "E.2 Computational Efficiency and Operational Cost of xVerify and Judge Models 26" + } + ] + } + ], + "index": 21 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 23 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 107, + 71, + 234, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 71, + 234, + 84 + ], + "spans": [ + { + "bbox": [ + 107, + 71, + 234, + 84 + ], + "type": "text", + "content": "A Datasets and Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 107, + 104, + 504, + 128 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 104, + 504, + 128 + ], + "spans": [ + { + "bbox": [ + 107, + 104, + 504, + 128 + ], + "type": "text", + "content": "This section will present the relevant information for all the public datasets and LLMs involved in the experiments of this paper." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 107, + 132, + 505, + 221 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 132, + 505, + 221 + ], + "spans": [ + { + "bbox": [ + 107, + 132, + 505, + 221 + ], + "type": "text", + "content": "In this study, we employ a total of 24 datasets, which are categorized into four primary types: multiple-choice questions (Choice), short answer questions (Short Answer), mathematical problems (Math), and classification tasks (Classification), as summarized in Table 3. To evaluate the multilingual capabilities of the xVerify model, each question type includes datasets in both Chinese and English, with one dataset featuring multilingual content. For each dataset, samples are partitioned into training and test sets following a 2:1 ratio, with the training and test sets ideally comprising 2,000 and 1,000 instances, respectively. In certain cases, the number of available samples is below 3,000, or the official test set is not publicly available, resulting in reduced dataset sizes after preprocessing." + } + ] + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 107, + 284, + 504, + 575 + ], + "blocks": [ + { + "bbox": [ + 107, + 243, + 504, + 278 + ], + "lines": [ + { + "bbox": [ + 107, + 243, + 504, + 278 + ], + "spans": [ + { + "bbox": [ + 107, + 243, + 504, + 278 + ], + "type": "text", + "content": "Table 3: Datasets Description. The \"Type\" column indicates the question type in the corresponding dataset, including multiple-choice questions (Choice), short answer questions (Short Answer), math questions (Math), and classification questions (Classification)." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 107, + 284, + 504, + 575 + ], + "lines": [ + { + "bbox": [ + 107, + 284, + 504, + 575 + ], + "spans": [ + { + "bbox": [ + 107, + 284, + 504, + 575 + ], + "type": "table", + "html": "
DatasetType#Train#TestLanguageLicense
CMMLUChoice20001000ChineseCC-BY-NC-4.0
C-EvalChoice1346260ChineseCC-BY-NC-SA-4.0
GPQAChoice794398EnglishCC-BY-4.0
MMLUChoice18161000EnglishMIT
MMLU-ProChoice20001000EnglishMIT
MMLU-RutexChoice20001000EnglishCC-BY-4.0
AgNewsClassification20001000EnglishUnspecified
AmazonClassification20001000EnglishApache-2.0
CLUEWSCClassification15481000ChineseUnspecified
CMNLIClassification20001000ChineseApache-2.0
AMC23Math2614EnglishUnspecified
AIME 2024Math2010EnglishMIT
CMATHMath1128565ChineseCC-BY-4.0
GSM8KMath20001000EnglishMIT
LiveMathBenchMath19093English & ChineseCC-BY-4.0
MATHMath20001000EnglishMIT
MGSMMath1892946MultilingualCC-BY-SA-4.0
OlympiadBenchMath1787892English & ChineseApache-2.0
ARCShort Answer20001000EnglishCC-BY-SA-4.0
CHIDShort Answer20001000ChineseApache-2.0
C-SimpleQAShort Answer20001000ChineseCC-BY-NC-SA-4.0
DROPShort Answer20001000EnglishCC-BY-SA-4.0
FRAMESShort Answer550274EnglishApache-2.0
SimpleQAShort Answer20001000EnglishMIT
", + "image_path": "ace9afe870fb516fba4a36b235780757dc31427ac11e60fe7a7dd45ba0ad352f.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 107, + 600, + 504, + 633 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 600, + 504, + 633 + ], + "spans": [ + { + "bbox": [ + 107, + 600, + 504, + 633 + ], + "type": "text", + "content": "A total of 19 large language models (LLMs) are utilized in our experiments, encompassing a diverse range of model sizes and types, with a particular emphasis on reasoning models (see Table 4). These models are subsequently used to collect LLM-generated responses and to train the xVerify model." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 107, + 664, + 231, + 677 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 664, + 231, + 677 + ], + "spans": [ + { + "bbox": [ + 107, + 664, + 231, + 677 + ], + "type": "text", + "content": "B VAR Dataset Details" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 107, + 699, + 504, + 722 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 699, + 504, + 722 + ], + "spans": [ + { + "bbox": [ + 107, + 699, + 504, + 722 + ], + "type": "text", + "content": "This section will present detailed information about the components of the VAR dataset, the details of human annotations, and examples from the dataset." + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 301, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 301, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 301, + 741, + 310, + 750 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 146, + 98, + 464, + 330 + ], + "blocks": [ + { + "bbox": [ + 105, + 69, + 506, + 92 + ], + "lines": [ + { + "bbox": [ + 105, + 69, + 506, + 92 + ], + "spans": [ + { + "bbox": [ + 105, + 69, + 506, + 92 + ], + "type": "text", + "content": "Table 4: LLMs Description. LLMs are listed by release date. All models are chat or instruct type. \"NaN\" indicates that public data is unavailable." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 146, + 98, + 464, + 330 + ], + "lines": [ + { + "bbox": [ + 146, + 98, + 464, + 330 + ], + "spans": [ + { + "bbox": [ + 146, + 98, + 464, + 330 + ], + "type": "table", + "html": "
Model#Para.TypePublisherDate
ChatGLM3-6B6BChatTsinghua2023.10
GPT-4oNaNChatOpenAI2024.05
Gemma-2-2B-it2BInstructGoogle2024.06
Gemma-2-9B-it9BInstructGoogle2024.06
GLM-4-9B-Chat9BChatTsinghua2024.06
InternLM2.5-7B-Chat7BChatShLab2024.06
Qwen2-1.5B-Instruct1.5BInstructAlibaba2024.06
Qwen2-7B-Instruct7BInstructAlibaba2024.06
Llama-3.1-8B-Instruct8BInstructMeta2024.07
Llama-3.2-1B-Instruct1BInstructMeta2024.09
Llama-3.2-3B-Instruct3BInstructMeta2024.09
Qwen2.5-7B-Instruct7BInstructAlibaba2024.09
Qwen2.5-14B-Instruct14BInstructAlibaba2024.09
Phi-414BChatMicrosoft2024.11
DeepSeek-R1-Distill-Llama-8B8BDistillDeepSeek2025.01
DeepSeek-R1-Distill-Qwen-1.5B1.5BDistillDeepSeek2025.01
DeepSeek-R1-Distill-Qwen-7B7BDistillDeepSeek2025.01
DeepSeek-R1-Distill-Qwen-14B14BDistillDeepSeek2025.01
QwQ-32B32BInstructAlibaba2025.03
", + "image_path": "ab208174476212e4137c9a301bd0ee711411191248932f47f50bc7023c55d50b.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 352, + 340, + 364 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 352, + 340, + 364 + ], + "spans": [ + { + "bbox": [ + 105, + 352, + 340, + 364 + ], + "type": "text", + "content": "B.1 Details of Training, Test, and Generalization Sets" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 373, + 194, + 385 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 373, + 194, + 385 + ], + "spans": [ + { + "bbox": [ + 105, + 373, + 194, + 385 + ], + "type": "text", + "content": "B.1.1 Training Set" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 393, + 504, + 427 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 393, + 504, + 427 + ], + "spans": [ + { + "bbox": [ + 104, + 393, + 504, + 427 + ], + "type": "text", + "content": "The training set comprises 43,204 samples. Tables 5 to 8 provide the sample counts corresponding to each LLM, dataset, prompt template, and question type. Note that datasets with names containing \"_enh\" refer to the augmented multiple choice question datasets." + } + ] + } + ], + "index": 4 + }, + { + "type": "table", + "bbox": [ + 198, + 456, + 411, + 645 + ], + "blocks": [ + { + "bbox": [ + 176, + 439, + 432, + 452 + ], + "lines": [ + { + "bbox": [ + 176, + 439, + 432, + 452 + ], + "spans": [ + { + "bbox": [ + 176, + 439, + 432, + 452 + ], + "type": "text", + "content": "Table 5: Number of samples from each LLM in the training set." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 198, + 456, + 411, + 645 + ], + "lines": [ + { + "bbox": [ + 198, + 456, + 411, + 645 + ], + "spans": [ + { + "bbox": [ + 198, + 456, + 411, + 645 + ], + "type": "table", + "html": "
ModelSample Counts
ChatGLM3-6B2588
GPT-4o2691
Gemma-2-2B-it2657
Gemma-2-9B-it2600
GLM-4-9B-Chat2957
InternLM2.5-7B-Chat2935
Qwen2-1.5B-Instruct2700
Qwen2-7B-Instruct2898
LLaMA-3.1-8B-Instruct2852
Qwen2.5-7B-Instruct2854
Qwen2.5-14B-Instruct2801
DeepSeek-R1-Distill-Llama-8B3223
DeepSeek-R1-Distill-Qwen-1.5B3231
DeepSeek-R1-Distill-Qwen-7B3075
DeepSeek-R1-Distill-Qwen-14B3142
", + "image_path": "fd11d917852a50665391d27d111940fb6ecebf798a934d5a7eff7d7d0412bf43.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 669, + 174, + 680 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 669, + 174, + 680 + ], + "spans": [ + { + "bbox": [ + 105, + 669, + 174, + 680 + ], + "type": "text", + "content": "B.1.2 Test Set" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 689, + 504, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 689, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 689, + 504, + 723 + ], + "type": "text", + "content": "The test set comprises 6,122 samples. Tables 9 to 12 provide the sample counts corresponding to each LLM, dataset, prompt template, and question type. Note that datasets with names containing \"_enh\" refer to the augmented multiple choice question datasets." + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 225, + 110, + 386, + 376 + ], + "blocks": [ + { + "bbox": [ + 173, + 93, + 436, + 105 + ], + "lines": [ + { + "bbox": [ + 173, + 93, + 436, + 105 + ], + "spans": [ + { + "bbox": [ + 173, + 93, + 436, + 105 + ], + "type": "text", + "content": "Table 6: Number of samples from each dataset in the training set." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 225, + 110, + 386, + 376 + ], + "lines": [ + { + "bbox": [ + 225, + 110, + 386, + 376 + ], + "spans": [ + { + "bbox": [ + 225, + 110, + 386, + 376 + ], + "type": "table", + "html": "
DatasetSample Counts
CMMLU1557
CMMLU_enh1641
GPQA1587
GPQA_enh1668
MMLU1520
MMLU_enh1513
MMLU-Pro1394
MMLU-Pro_enh1442
AgNews1751
CLUEWSC5008
AMC231625
AIME 20241333
CMATH1893
GSM8K1836
MATH2485
MGSM1384
OlympiadBench_en2573
OlympiadBench_zh2709
CHID2424
C-SimpleQA1913
DROP1928
FRAMES2020
", + "image_path": "73a9453381a215760714368ac5cb85025f3e7bd128dfccae5838a59ffcaadddf.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 227, + 445, + 382, + 559 + ], + "blocks": [ + { + "bbox": [ + 154, + 428, + 455, + 441 + ], + "lines": [ + { + "bbox": [ + 154, + 428, + 455, + 441 + ], + "spans": [ + { + "bbox": [ + 154, + 428, + 455, + 441 + ], + "type": "text", + "content": "Table 7: Number of samples from each prompt template in the training set." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 227, + 445, + 382, + 559 + ], + "lines": [ + { + "bbox": [ + 227, + 445, + 382, + 559 + ], + "spans": [ + { + "bbox": [ + 227, + 445, + 382, + 559 + ], + "type": "table", + "html": "
Prompt TemplateSample Counts
0-shot4884
0-shot-restrict5977
0-shot-cot4907
0-shot-cot-restrict6041
5-shot4774
5-shot-restrict5866
5-shot-cot4916
5-shot-cot-restrict5839
", + "image_path": "c2fad44e3447682f07ed7b3b720c84c552b7a4e0bd7f0a0ad060aca491fe686e.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 233, + 628, + 377, + 696 + ], + "blocks": [ + { + "bbox": [ + 160, + 610, + 449, + 623 + ], + "lines": [ + { + "bbox": [ + 160, + 610, + 449, + 623 + ], + "spans": [ + { + "bbox": [ + 160, + 610, + 449, + 623 + ], + "type": "text", + "content": "Table 8: Number of samples from each question type in the training set." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 233, + 628, + 377, + 696 + ], + "lines": [ + { + "bbox": [ + 233, + 628, + 377, + 696 + ], + "spans": [ + { + "bbox": [ + 233, + 628, + 377, + 696 + ], + "type": "table", + "html": "
DatasetSample Counts
Multiple Choice12322
Math15838
Short Answer8285
Classification6759
", + "image_path": "7f8043c5633395c2adfc55d0b319e11b7f5bbfb48b2a9d7af5961c8ed362baa1.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 312, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 312, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 312, + 750 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 6 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 198, + 99, + 413, + 286 + ], + "blocks": [ + { + "bbox": [ + 185, + 80, + 425, + 92 + ], + "lines": [ + { + "bbox": [ + 185, + 80, + 425, + 92 + ], + "spans": [ + { + "bbox": [ + 185, + 80, + 425, + 92 + ], + "type": "text", + "content": "Table 9: Number of samples from each LLM in the test set." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 198, + 99, + 413, + 286 + ], + "lines": [ + { + "bbox": [ + 198, + 99, + 413, + 286 + ], + "spans": [ + { + "bbox": [ + 198, + 99, + 413, + 286 + ], + "type": "table", + "html": "
ModelSample Counts
ChatGLM3-6B378
GPT-4o400
Gemma-2-2B-it416
Gemma-2-9B-it369
GLM-4-9B-Chat367
InternLM2.5-7B-Chat367
Qwen2-1.5B-Instruct433
Qwen2-7B-Instruct427
LLaMA-3.1-8B-Instruct404
Qwen2.5-7B-Instruct374
Qwen2.5-14B-Instruct415
DeepSeek-R1-Distill-Llama-8B430
DeepSeek-R1-Distill-Qwen-1.5B451
DeepSeek-R1-Distill-Qwen-7B439
DeepSeek-R1-Distill-Qwen-14B452
", + "image_path": "3dac7cbc26dfe8f1ceb6cbaed76906c9a6301581c993f71b06d82a52f269f72d.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 225, + 331, + 386, + 596 + ], + "blocks": [ + { + "bbox": [ + 179, + 314, + 430, + 326 + ], + "lines": [ + { + "bbox": [ + 179, + 314, + 430, + 326 + ], + "spans": [ + { + "bbox": [ + 179, + 314, + 430, + 326 + ], + "type": "text", + "content": "Table 10: Number of samples from each dataset in the test set." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 225, + 331, + 386, + 596 + ], + "lines": [ + { + "bbox": [ + 225, + 331, + 386, + 596 + ], + "spans": [ + { + "bbox": [ + 225, + 331, + 386, + 596 + ], + "type": "table", + "html": "
DatasetSample Counts
CMMLU216
CMMLU_enh195
GPQA207
GPQA_enh235
MMLU225
MMLU_enh222
MMLU-Pro171
MMLU-Pro_enh192
AgNews261
CLUEWSC710
AMC23258
AIME 2024186
CMATH263
GSM8K262
MATH362
MGSM205
OlympiadBench_en349
OlympiadBench_zh446
CHID347
C-SimpleQA270
DROP265
FRAMES275
", + "image_path": "0629df9d0b19bee4203560dc4a695c83d0ddc6503641996064d63f483195b57e.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 233, + 641, + 378, + 709 + ], + "blocks": [ + { + "bbox": [ + 160, + 623, + 449, + 635 + ], + "lines": [ + { + "bbox": [ + 160, + 623, + 449, + 635 + ], + "spans": [ + { + "bbox": [ + 160, + 623, + 449, + 635 + ], + "type": "text", + "content": "Table 11: Number of samples from each prompt template in the test set." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 233, + 641, + 378, + 709 + ], + "lines": [ + { + "bbox": [ + 233, + 641, + 378, + 709 + ], + "spans": [ + { + "bbox": [ + 233, + 641, + 378, + 709 + ], + "type": "table", + "html": "
DatasetSample Counts
Multiple Choice1663
Math2331
Short Answer1157
Classification971
", + "image_path": "c517511348593103d11d7b4b5735a2b47eb6ee73d4973baed5ac68d9f8ac00a3.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 6 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 227, + 87, + 382, + 200 + ], + "blocks": [ + { + "bbox": [ + 166, + 69, + 443, + 82 + ], + "lines": [ + { + "bbox": [ + 166, + 69, + 443, + 82 + ], + "spans": [ + { + "bbox": [ + 166, + 69, + 443, + 82 + ], + "type": "text", + "content": "Table 12: Number of samples from each question type in the test set." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 227, + 87, + 382, + 200 + ], + "lines": [ + { + "bbox": [ + 227, + 87, + 382, + 200 + ], + "spans": [ + { + "bbox": [ + 227, + 87, + 382, + 200 + ], + "type": "table", + "html": "
Prompt TemplateSample Counts
0-shot680
0-shot-restrict798
0-shot-cot642
0-shot-cot-restrict891
5-shot690
5-shot-restrict789
5-shot-cot702
5-shot-cot-restrict930
", + "image_path": "b1acc2ec410f8786487d62af1a5b00c560cbb70f19fd5ef0c2cc2a8b235279f6.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 221, + 219, + 232 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 221, + 219, + 232 + ], + "spans": [ + { + "bbox": [ + 105, + 221, + 219, + 232 + ], + "type": "text", + "content": "B.1.3 Generalization Set" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 242, + 506, + 277 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 242, + 506, + 277 + ], + "spans": [ + { + "bbox": [ + 104, + 242, + 506, + 277 + ], + "type": "text", + "content": "The generalization set comprises 6,468 samples. Tables 13 to 16 provide the sample counts corresponding to each LLM, dataset, prompt template, and question type. Note that datasets with names containing \"_enh\" refer to the augmented multiple choice question datasets." + } + ] + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 198, + 305, + 413, + 539 + ], + "blocks": [ + { + "bbox": [ + 160, + 288, + 448, + 300 + ], + "lines": [ + { + "bbox": [ + 160, + 288, + 448, + 300 + ], + "spans": [ + { + "bbox": [ + 160, + 288, + 448, + 300 + ], + "type": "text", + "content": "Table 13: Number of samples from each LLM in the generalization set." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 198, + 305, + 413, + 539 + ], + "lines": [ + { + "bbox": [ + 198, + 305, + 413, + 539 + ], + "spans": [ + { + "bbox": [ + 198, + 305, + 413, + 539 + ], + "type": "table", + "html": "
ModelSample Counts
ChatGLM3-6B300
GPT-4o305
Gemma-2-2B-it427
Gemma-2-9B-it296
GLM-4-9B-Chat339
InternLM2.5-7B-Chat341
Qwen2-1.5B-Instruct280
Qwen2-7B-Instruct346
LLaMA-3.1-8B-Instruct400
LLaMA-3.2-1B-Instruct314
LLaMA-3.2-3B-Instruct310
Qwen2.5-7B-Instruct326
Qwen2.5-14B-Instruct334
Phi-4314
DeepSeek-R1-Distill-Llama-8B341
DeepSeek-R1-Distill-Qwen-1.5B399
DeepSeek-R1-Distill-Qwen-7B375
DeepSeek-R1-Distill-Qwen-14B434
QwQ-32B287
", + "image_path": "f5727c2e469bab12fce1b06ec5d22b6afe087d69e01d8fc5aaf17bfaa9fdf2fe.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "type": "table", + "bbox": [ + 225, + 576, + 386, + 711 + ], + "blocks": [ + { + "bbox": [ + 157, + 559, + 453, + 571 + ], + "lines": [ + { + "bbox": [ + 157, + 559, + 453, + 571 + ], + "spans": [ + { + "bbox": [ + 157, + 559, + 453, + 571 + ], + "type": "text", + "content": "Table 14: Number of samples from each dataset in the generalization set." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 225, + 576, + 386, + 711 + ], + "lines": [ + { + "bbox": [ + 225, + 576, + 386, + 711 + ], + "spans": [ + { + "bbox": [ + 225, + 576, + 386, + 711 + ], + "type": "table", + "html": "
DatasetSample Counts
C-Eval435
C-Eval_enh442
MMLU-Redux436
MMLU-Redux_enh483
Amazon646
CMNLI643
LiveMathBench_en1127
LiveMathBench_zh821
ARC807
SimpleQA628
", + "image_path": "e9b4dd3cde946e173686828b3110179c284836bb8a3a8b4e6ad063eb9ec3c465.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_body" + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 232, + 87, + 378, + 156 + ], + "blocks": [ + { + "bbox": [ + 138, + 69, + 472, + 82 + ], + "lines": [ + { + "bbox": [ + 138, + 69, + 472, + 82 + ], + "spans": [ + { + "bbox": [ + 138, + 69, + 472, + 82 + ], + "type": "text", + "content": "Table 15: Number of samples from each prompt template in the generalization set." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 232, + 87, + 378, + 156 + ], + "lines": [ + { + "bbox": [ + 232, + 87, + 378, + 156 + ], + "spans": [ + { + "bbox": [ + 232, + 87, + 378, + 156 + ], + "type": "table", + "html": "
DatasetSample Counts
Multiple Choice1796
Math1948
Short Answer1435
Classification1289
", + "image_path": "0a73b1232815b5f11a0943458d3e6c251b60e51f233064cae9b36a3e559a35f7.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 228, + 182, + 382, + 294 + ], + "blocks": [ + { + "bbox": [ + 144, + 164, + 465, + 176 + ], + "lines": [ + { + "bbox": [ + 144, + 164, + 465, + 176 + ], + "spans": [ + { + "bbox": [ + 144, + 164, + 465, + 176 + ], + "type": "text", + "content": "Table 16: Number of samples from each question type in the generalization set." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 228, + 182, + 382, + 294 + ], + "lines": [ + { + "bbox": [ + 228, + 182, + 382, + 294 + ], + "spans": [ + { + "bbox": [ + 228, + 182, + 382, + 294 + ], + "type": "table", + "html": "
Prompt TemplateSample Counts
0-shot703
0-shot-restrict856
0-shot-cot772
0-shot-cot-restrict915
5-shot690
5-shot-restrict885
5-shot-cot756
5-shot-cot-restrict891
", + "image_path": "f0f39aa49410fc0e741f63585bbd3a48e8a9c8fdfc5d987e2b79d4ffef1da1bd.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 314, + 258, + 325 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 314, + 258, + 325 + ], + "spans": [ + { + "bbox": [ + 105, + 314, + 258, + 325 + ], + "type": "text", + "content": "B.2 Details of Human Annotation" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 334, + 506, + 401 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 334, + 506, + 401 + ], + "spans": [ + { + "bbox": [ + 104, + 334, + 506, + 401 + ], + "type": "text", + "content": "To ensure high-quality annotation for the VAR dataset, we assembled a team of 8 annotators. Among them, 6 hold bachelor's degrees and are primarily responsible for batch annotation tasks, while the other 2 hold master's degrees and focus on reviewing complex cases or resolving discrepancies in annotations made by multiple annotators. The gender ratio within the annotation team is balanced at 1:1. In terms of compensation, all annotators were paid according to the local industry average rates. The annotation process lasted for three weeks, covering a total of 15 working days." + } + ] + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 106, + 410, + 504, + 616 + ], + "blocks": [ + { + "bbox": [ + 106, + 410, + 504, + 616 + ], + "lines": [ + { + "bbox": [ + 106, + 410, + 504, + 616 + ], + "spans": [ + { + "bbox": [ + 106, + 410, + 504, + 616 + ], + "type": "image", + "image_path": "b0c4cb4845ea9f03c0724fd183e74ebd191cb72e12c445bf83a4897206519880.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 201, + 622, + 408, + 635 + ], + "lines": [ + { + "bbox": [ + 201, + 622, + 408, + 635 + ], + "spans": [ + { + "bbox": [ + 201, + 622, + 408, + 635 + ], + "type": "text", + "content": "Figure 3: Illustration of the Label Studio Interface." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 645, + 506, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 645, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 645, + 506, + 723 + ], + "type": "text", + "content": "The detailed annotation guidelines are presented below. Figure 3 shows an example of the interface used in our annotation tool. Each sample to be annotated contains four fields: question, LLM output, correct answer, and answer range. The question type includes four categories: multiple choice, math, short answer, and classification. Annotators are required to judge whether the LLM output matches the correct answer based on the question, while the answer range serves as auxiliary reference information to support the decision-making process. The specific annotation instructions and criteria are as follows:" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 312, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 312, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 312, + 750 + ], + "type": "text", + "content": "19" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 18 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 72, + 342, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 72, + 342, + 85 + ], + "spans": [ + { + "bbox": [ + 105, + 72, + 342, + 85 + ], + "type": "text", + "content": "Answer evaluation criteria for different question types:" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 132, + 94, + 212, + 104 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 94, + 212, + 104 + ], + "spans": [ + { + "bbox": [ + 132, + 94, + 212, + 104 + ], + "type": "text", + "content": "- Multiple Choice" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 138, + 105, + 506, + 138 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 105, + 506, + 138 + ], + "spans": [ + { + "bbox": [ + 138, + 105, + 506, + 138 + ], + "type": "text", + "content": "For multiple-choice questions, answer options may be labeled with letters (A, B, C, D, ...) Roman numerals (I, II, III, IV, ...), or Arabic numerals (1, 2, 3, 4, ...). The LLM output is considered correct if it provides:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 150, + 141, + 301, + 178 + ], + "type": "list", + "angle": 0, + "index": 6, + "blocks": [ + { + "bbox": [ + 150, + 141, + 281, + 152 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 150, + 141, + 281, + 152 + ], + "spans": [ + { + "bbox": [ + 150, + 141, + 281, + 152 + ], + "type": "text", + "content": "- Only the correct option label;" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 150, + 155, + 291, + 166 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 150, + 155, + 291, + 166 + ], + "spans": [ + { + "bbox": [ + 150, + 155, + 291, + 166 + ], + "type": "text", + "content": "- Only the correct option content;" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 150, + 167, + 301, + 178 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 150, + 167, + 301, + 178 + ], + "spans": [ + { + "bbox": [ + 150, + 167, + 301, + 178 + ], + "type": "text", + "content": "- Both the correct label and content." + } + ] + } + ], + "index": 5 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 139, + 182, + 504, + 226 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 139, + 182, + 504, + 226 + ], + "spans": [ + { + "bbox": [ + 139, + 182, + 504, + 226 + ], + "type": "text", + "content": "In cases where the label and content are inconsistent, the content takes precedence. If the content is correct, the answer is marked as correct; if the content is incorrect, the answer is marked as incorrect, even if the option label is correct (see the final annotation example for reference)." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 132, + 230, + 203, + 239 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 230, + 203, + 239 + ], + "spans": [ + { + "bbox": [ + 132, + 230, + 203, + 239 + ], + "type": "text", + "content": "- Short Answer" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 139, + 241, + 504, + 262 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 139, + 241, + 504, + 262 + ], + "spans": [ + { + "bbox": [ + 139, + 241, + 504, + 262 + ], + "type": "text", + "content": "Short-answer questions may require responses such as names, locations, numbers, dates, or full sentences. The evaluation criteria are:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 149, + 266, + 505, + 315 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 149, + 266, + 490, + 278 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 149, + 266, + 490, + 278 + ], + "spans": [ + { + "bbox": [ + 149, + 266, + 490, + 278 + ], + "type": "text", + "content": "- For concise answers (e.g., names, places, dates), strict string matching is required." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 149, + 279, + 505, + 291 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 149, + 279, + 505, + 291 + ], + "spans": [ + { + "bbox": [ + 149, + 279, + 505, + 291 + ], + "type": "text", + "content": "- For sentence-level answers, semantic consistency with the reference answer is required." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 149, + 292, + 504, + 315 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 149, + 292, + 504, + 315 + ], + "spans": [ + { + "bbox": [ + 149, + 292, + 504, + 315 + ], + "type": "text", + "content": "- For numerical answers, mathematical equivalence must be verified (e.g., \"12000\" and \"12,000\" are considered equivalent)." + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 132, + 318, + 201, + 328 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 318, + 201, + 328 + ], + "spans": [ + { + "bbox": [ + 132, + 318, + 201, + 328 + ], + "type": "text", + "content": "- Classification" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 139, + 329, + 504, + 352 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 139, + 329, + 504, + 352 + ], + "spans": [ + { + "bbox": [ + 139, + 329, + 504, + 352 + ], + "type": "text", + "content": "Classification questions come with a fixed set of candidate answers. The LLM output must explicitly and exactly match the correct answer in this set to be judged as correct." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 132, + 355, + 167, + 364 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 355, + 167, + 364 + ], + "spans": [ + { + "bbox": [ + 132, + 355, + 167, + 364 + ], + "type": "text", + "content": "Math" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 139, + 365, + 504, + 388 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 139, + 365, + 504, + 388 + ], + "spans": [ + { + "bbox": [ + 139, + 365, + 504, + 388 + ], + "type": "text", + "content": "For mathematical questions, the final answer in the LLM output must be mathematically equivalent to the reference answer. Evaluation criteria include:" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 149, + 392, + 504, + 437 + ], + "type": "list", + "angle": 0, + "index": 20, + "blocks": [ + { + "bbox": [ + 149, + 392, + 504, + 413 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 149, + 392, + 504, + 413 + ], + "spans": [ + { + "bbox": [ + 149, + 392, + 504, + 413 + ], + "type": "text", + "content": "- If an initial answer (ans1) is given but followed by a derived final answer (ans2) through calculation, ans2 should be used for evaluation." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 149, + 415, + 504, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 149, + 415, + 504, + 437 + ], + "spans": [ + { + "bbox": [ + 149, + 415, + 504, + 437 + ], + "type": "text", + "content": "- If the LLM output or ground-truth answer is provided in LaTeX format and cannot be visually interpreted, a LaTeX compiler should be used to determine equivalence." + } + ] + } + ], + "index": 19 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 105, + 448, + 166, + 459 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 448, + 166, + 459 + ], + "spans": [ + { + "bbox": [ + 105, + 448, + 166, + 459 + ], + "type": "text", + "content": "Special cases:" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 132, + 469, + 244, + 480 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 469, + 244, + 480 + ], + "spans": [ + { + "bbox": [ + 132, + 469, + 244, + 480 + ], + "type": "text", + "content": "Overly Long Responses" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 139, + 480, + 506, + 513 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 139, + 480, + 506, + 513 + ], + "spans": [ + { + "bbox": [ + 139, + 480, + 506, + 513 + ], + "type": "text", + "content": "If the LLM output is excessively long, use the final answer provided as the basis for judgment. If the response does not converge to a clear answer (e.g., repeated changes or ambiguity), it should be marked as incorrect." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 132, + 517, + 244, + 527 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 517, + 244, + 527 + ], + "spans": [ + { + "bbox": [ + 132, + 517, + 244, + 527 + ], + "type": "text", + "content": "- Truncated Calculations" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 139, + 528, + 505, + 561 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 139, + 528, + 505, + 561 + ], + "spans": [ + { + "bbox": [ + 139, + 528, + 505, + 561 + ], + "type": "text", + "content": "In long responses where the final verification or calculation is truncated, it can be ignored. If a clear answer was provided earlier, use it for evaluation; if not, mark the response as incorrect (see the second-to-last annotation example)." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 132, + 564, + 231, + 574 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 564, + 231, + 574 + ], + "spans": [ + { + "bbox": [ + 132, + 564, + 231, + 574 + ], + "type": "text", + "content": "Unfamiliar Domains" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 139, + 575, + 504, + 609 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 139, + 575, + 504, + 609 + ], + "spans": [ + { + "bbox": [ + 139, + 575, + 504, + 609 + ], + "type": "text", + "content": "If the correctness of the LLM response cannot be determined due to unfamiliar domain knowledge (e.g., mathematical expressions whose equivalence is hard to judge), the case may be skipped and will later be annotated by more qualified annotators." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 132, + 612, + 283, + 622 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 612, + 283, + 622 + ], + "spans": [ + { + "bbox": [ + 132, + 612, + 283, + 622 + ], + "type": "text", + "content": "- Irrelevant but Matched Answers" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 139, + 623, + 504, + 645 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 139, + 623, + 504, + 645 + ], + "spans": [ + { + "bbox": [ + 139, + 623, + 504, + 645 + ], + "type": "text", + "content": "If the LLM output is irrelevant to the question but coincidentally contains the correct final answer, it should be marked as incorrect." + } + ] + } + ], + "index": 29 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "20" + } + ] + } + ], + "index": 30 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 19 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 72, + 271, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 72, + 271, + 84 + ], + "spans": [ + { + "bbox": [ + 105, + 72, + 271, + 84 + ], + "type": "text", + "content": "B.3 Examples from the VAR Dataset" + } + ] + } + ], + "index": 0 + }, + { + "type": "code", + "bbox": [ + 104, + 94, + 509, + 650 + ], + "blocks": [ + { + "bbox": [ + 104, + 94, + 509, + 650 + ], + "lines": [ + { + "bbox": [ + 104, + 94, + 509, + 650 + ], + "spans": [ + { + "bbox": [ + 104, + 94, + 509, + 650 + ], + "type": "text", + "content": "{ \"dataset\": \"SimpleQA\", \"question\": \"In which year did Fayaz A. Malik (an Indian pharmacologist, cancer biologist, and scientist) receive the Young Scientist of the Year from the Council of Scientific and Industrial Research?\", \"question_type\": \"short answer\", \"correct_answer\": \"2009\", \"llm_output\": \"The year Fayaz A. Malik received the Young Scientist of the Year award from the Council of Scientific and Industrial Research was 2001\\. \\textbackslash textbackslash n\\textbackslash textbackslash nThe answer is 2001.\", \"humanjudge_result\": \"Incorrect\" } { \"dataset\": \"MMLU-Redux_enh\", \"question\": \"If you wanted to find the global distribution of coal , you would use a Answer Choices: (I) reference map. (II) topographic map. (III) thematic map. (IV) location map.\", \"question_type\": \"multiple choice\", \"correct_answer\": \"III\", \"llm_output\": \"The answer is (III) thematic map.\", \"humanjudge_result\": \"Correct\" } { \"dataset\": \"Amazon\", \"question\": \"Lightsaber Action!!: This game is absolutely the best game I have ever seen! there is a little bit of violence, but most of the time you kill droids, not humans. My favorite weapon is the lightsaber, because you can deflect laser blasts\\. \\textbackslash textbackslash n Please identify the sentiment polarity of the sentence: positive or negative\", \"question_type\": \"classification\", \"correct_answer\": \"positive\", \"llm_output\": \"The answer is positive.\", \"humanjudge_result\": \"Correct\" } { \"dataset\": \"GSM8K\", \"question\": \"Daisy and Rose were enjoying their backyard pool with their dogs. If there are 24 legs/paws in the pool, how many dogs do Daisy and Rose have?\", \"question_type\": \"math\", \"correct_answer\": \"5\", \"llm_output\": \"To find the number of dogs Daisy and Rose have, we need to first determine the total number of people and then subtract that from the total number of legs/paws\\.n\\sSince each person has 2 legs, we can divide the total number of legs/paws by 2 to find the number of people\\.n24 legs/paws / 2 legs/person = 12 people\\n\\sSince Daisy and Rose are the only people in the pool, we can subtract them from the total number of people to find the number of dogs\\.n12 people - 2 people = 10 dogs\\n\\nTherefore, Daisy and Rose have 10 dogs\\.\". \"humanjudge_result\": \"Incorrect\" }" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "code_body" + } + ], + "index": 1, + "sub_type": "code", + "guess_lang": "jsonl" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "21" + } + ] + } + ], + "index": 2 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 20 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 71, + 248, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 71, + 248, + 85 + ], + "spans": [ + { + "bbox": [ + 105, + 71, + 248, + 85 + ], + "type": "text", + "content": "C Model Training Details" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 95, + 487, + 109 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 95, + 487, + 109 + ], + "spans": [ + { + "bbox": [ + 104, + 95, + 487, + 109 + ], + "type": "text", + "content": "This section will further present additional information about the training of the xVerify model." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 120, + 248, + 133 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 120, + 248, + 133 + ], + "spans": [ + { + "bbox": [ + 105, + 120, + 248, + 133 + ], + "type": "text", + "content": "C.1 Training Hyperparameters" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 140, + 504, + 174 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 140, + 504, + 174 + ], + "spans": [ + { + "bbox": [ + 104, + 140, + 504, + 174 + ], + "type": "text", + "content": "The xVerify model is trained using the QLoRA method, with consistent hyperparameter settings across all base models. The training is carried out on multiple GPU servers. Table 17 presents the key training hyperparameters." + } + ] + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 195, + 200, + 401, + 324 + ], + "blocks": [ + { + "bbox": [ + 195, + 200, + 401, + 324 + ], + "lines": [ + { + "bbox": [ + 195, + 200, + 401, + 324 + ], + "spans": [ + { + "bbox": [ + 195, + 200, + 401, + 324 + ], + "type": "table", + "html": "
HyperparameterSetting
Per Device Train Batch Size1
Gradient Accumulation Steps8
Learning Rate1.0e-4
Num Train Epochs1.0
LrScheduler Typecosine
Warmup Ratio0.1
Bf16true
Ddp Timeout180000000
Lora Rank8
", + "image_path": "896e2b96f77717b8e0c38a400cdd6dd74ebd302e370ea750aa771547dddedf3e.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 195, + 182, + 414, + 195 + ], + "lines": [ + { + "bbox": [ + 195, + 182, + 414, + 195 + ], + "spans": [ + { + "bbox": [ + 195, + 182, + 414, + 195 + ], + "type": "text", + "content": "Table 17: Hyperparameter settings for model training." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 105, + 342, + 230, + 354 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 342, + 230, + 354 + ], + "spans": [ + { + "bbox": [ + 105, + 342, + 230, + 354 + ], + "type": "text", + "content": "C.2 Original Model Details" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 362, + 504, + 396 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 362, + 504, + 396 + ], + "spans": [ + { + "bbox": [ + 104, + 362, + 504, + 396 + ], + "type": "text", + "content": "This paper uses 14 original models of different parameter scales and types for training on the VAR dataset. Table 18 presents the relevant information for all xVerify models and their corresponding original models." + } + ] + } + ], + "index": 7 + }, + { + "type": "table", + "bbox": [ + 139, + 432, + 471, + 609 + ], + "blocks": [ + { + "bbox": [ + 104, + 404, + 504, + 427 + ], + "lines": [ + { + "bbox": [ + 104, + 404, + 504, + 427 + ], + "spans": [ + { + "bbox": [ + 104, + 404, + 504, + 427 + ], + "type": "text", + "content": "Table 18: Details of Original Models and Corresponding xVerify Models. Sorted by Original Model Name." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 139, + 432, + 471, + 609 + ], + "lines": [ + { + "bbox": [ + 139, + 432, + 471, + 609 + ], + "spans": [ + { + "bbox": [ + 139, + 432, + 471, + 609 + ], + "type": "table", + "html": "
Original Model#Para.TypeContext LengthxVerify Model
Gemma-2-2B-it2BInstruct8KxVerify-2B-I
Gemma-2-9B-it9BInstruct8KxVerify-9B-I
Gemma-2-27B-it27BInstruct8KxVerify-27B-I
GLM-4-9B-Chat9BChat128KxVerify-9B-C
Llama-3.2-1B-Instruct1BInstruct128KxVerify-1B-I
Llama-3.2-3B-Instruct3BInstruct128KxVerify-3B-Ia
Llama-3.1-8B-Instruct8BInstruct128KxVerify-8B-I
Phi-414BInstruct16kxVerify-14B-Ib
Qwen2.5-0.5B-Instruct0.5BInstruct128KxVerify-0.5B-I
Qwen2.5-1.5B-Instruct1.5BInstruct128KxVerify-1.5B-I
Qwen2.5-3B-Instruct3BInstruct128KxVerify-3B-Ib
Qwen2.5-7B-Instruct7BInstruct128KxVerify-7B-I
Qwen2.5-14B-Instruct14BInstruct128KxVerify-14B-Ia
Qwen2.5-32B-Instruct32BInstruct128KxVerify-32B-I
", + "image_path": "66b6457509766ada8a1bb0ea03cd9f4f8d43c958b3a107ea849da0475400826c.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_body" + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 630, + 173, + 644 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 630, + 173, + 644 + ], + "spans": [ + { + "bbox": [ + 105, + 630, + 173, + 644 + ], + "type": "text", + "content": "D Prompts" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 654, + 455, + 668 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 654, + 455, + 668 + ], + "spans": [ + { + "bbox": [ + 104, + 654, + 455, + 668 + ], + "type": "text", + "content": "This section will present all the prompt templates used in the experiments of this paper." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 679, + 307, + 692 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 679, + 307, + 692 + ], + "spans": [ + { + "bbox": [ + 105, + 679, + 307, + 692 + ], + "type": "text", + "content": "D.1 Prompts for Generating LLM Responses" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 700, + 504, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 700, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 700, + 504, + 723 + ], + "type": "text", + "content": "The prompt templates used to generate LLM responses are illustrated in Figures 4 to 7. Each template consists of four fields that need to be populated: \"task_type\", \"task_description\", \"examples\", and" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "22" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 21 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 72, + 506, + 172 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 72, + 506, + 172 + ], + "spans": [ + { + "bbox": [ + 104, + 72, + 506, + 172 + ], + "type": "text", + "content": "\"question\". The \"task_type\" and \"task_description\" fields are determined based on the type of question. For instance, for questions from the GPQA dataset, \"task_type\" is set to \"multidisciplinary question\", and \"task_description\" is set to \"Please choose the answer from options A to D, corresponding to the question.\" During dataset preprocessing, we design appropriate \"task_type\" and \"task_description\" values for each dataset. The \"examples\" field is filled according to the selected prompting strategy, either 0-shot or 5-shot. In the 0-shot setting, this field is left empty, while in the 5-shot setting, it is populated with five example question-answer pairs that are similar to the target \"question\". The \"question\" field contains the specific query to be answered by the LLM. Examples of the \"examples\" and \"question\" fields are shown in Figures 8 and 9, respectively." + } + ] + } + ], + "index": 0 + }, + { + "type": "code", + "bbox": [ + 120, + 190, + 312, + 222 + ], + "blocks": [ + { + "bbox": [ + 120, + 190, + 312, + 222 + ], + "lines": [ + { + "bbox": [ + 120, + 190, + 312, + 222 + ], + "spans": [ + { + "bbox": [ + 120, + 190, + 312, + 222 + ], + "type": "text", + "content": "You are an expert in {task_type}, {task_description} \n{examples} \n{question}" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "code_body" + } + ], + "index": 1, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "bbox": [ + 186, + 242, + 423, + 255 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 186, + 242, + 423, + 255 + ], + "spans": [ + { + "bbox": [ + 186, + 242, + 423, + 255 + ], + "type": "text", + "content": "Figure 4: Few-shot prompt for generating LLM responses." + } + ] + } + ], + "index": 2, + "type": "text" + }, + { + "type": "code", + "bbox": [ + 120, + 279, + 317, + 331 + ], + "blocks": [ + { + "bbox": [ + 120, + 279, + 317, + 331 + ], + "lines": [ + { + "bbox": [ + 120, + 279, + 317, + 331 + ], + "spans": [ + { + "bbox": [ + 120, + 279, + 317, + 331 + ], + "type": "text", + "content": "You are an expert in {task_type}, {task_description} \n{examples} \n{question} \nEnd your final answer with 'The answer is ." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "code_body" + } + ], + "index": 3, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "bbox": [ + 171, + 350, + 439, + 363 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 171, + 350, + 439, + 363 + ], + "spans": [ + { + "bbox": [ + 171, + 350, + 439, + 363 + ], + "type": "text", + "content": "Figure 5: Few-shot-restrict prompt for generating LLM responses." + } + ] + } + ], + "index": 4, + "type": "text" + }, + { + "type": "code", + "bbox": [ + 120, + 388, + 312, + 440 + ], + "blocks": [ + { + "bbox": [ + 120, + 388, + 312, + 440 + ], + "lines": [ + { + "bbox": [ + 120, + 388, + 312, + 440 + ], + "spans": [ + { + "bbox": [ + 120, + 388, + 312, + 440 + ], + "type": "text", + "content": "You are an expert in {task_type}, {task_description} \n{examples} \n{question} \nLet's think step by step." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "code_body" + } + ], + "index": 5, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "bbox": [ + 178, + 460, + 430, + 472 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 178, + 460, + 430, + 472 + ], + "spans": [ + { + "bbox": [ + 178, + 460, + 430, + 472 + ], + "type": "text", + "content": "Figure 6: Few-shot-cot prompt for generating LLM responses." + } + ] + } + ], + "index": 6, + "type": "text" + }, + { + "bbox": [ + 105, + 491, + 270, + 502 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 491, + 270, + 502 + ], + "spans": [ + { + "bbox": [ + 105, + 491, + 270, + 502 + ], + "type": "text", + "content": "D.2 Prompts for GPT-4o Annotation" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 511, + 504, + 556 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 511, + 504, + 556 + ], + "spans": [ + { + "bbox": [ + 104, + 511, + 504, + 556 + ], + "type": "text", + "content": "The prompt templates used for annotating the collected LLM question-answer pairs with GPT-4o during the construction of the VAR dataset are shown in Figures 10 and 11. Both of these prompt templates employ the Chain-of-Thought (CoT) strategy to ensure the accuracy of the annotations generated by GPT-4o." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 569, + 270, + 581 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 569, + 270, + 581 + ], + "spans": [ + { + "bbox": [ + 105, + 569, + 270, + 581 + ], + "type": "text", + "content": "D.3 Prompts for Data Augmentation" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 589, + 506, + 612 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 589, + 506, + 612 + ], + "spans": [ + { + "bbox": [ + 104, + 589, + 506, + 612 + ], + "type": "text", + "content": "In constructing the VAR dataset, two prompt templates used to guide GPT-4o in augmenting mathematical question samples are presented in Figures 12 and 13." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 624, + 242, + 636 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 624, + 242, + 636 + ], + "spans": [ + { + "bbox": [ + 105, + 624, + 242, + 636 + ], + "type": "text", + "content": "D.4 Prompts for Judge Model" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 645, + 504, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 645, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 645, + 504, + 723 + ], + "type": "text", + "content": "In the experiments of this paper, the prompts used for all judge models were constructed based on the official templates provided by their respective developers. However, for some judge models, the official prompt templates were not fully compatible with the evaluation tasks in this paper, so other similar prompt templates were used. Specifically, Figure 14 shows the prompt template used by GPT-4o as Judge, Figure 15 shows the prompt template used by GPT-4o as Judge (CoT), Figure 16 shows the prompt template used by JudgeLM series models and PandaLM-7B-v1, Figure 17 shows the prompt template used by Auto-J series models, and Figure 18 shows the prompt template used" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "23" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 22 + }, + { + "para_blocks": [ + { + "bbox": [ + 120, + 104, + 312, + 136 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 104, + 312, + 136 + ], + "spans": [ + { + "bbox": [ + 120, + 104, + 312, + 136 + ], + "type": "text", + "content": "You are an expert in {task_type}, {task_description} \n{examples} \n{question}" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 121, + 144, + 208, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 144, + 208, + 156 + ], + "spans": [ + { + "bbox": [ + 121, + 144, + 208, + 156 + ], + "type": "text", + "content": "Let's think step by step." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 121, + 164, + 317, + 175 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 164, + 317, + 175 + ], + "spans": [ + { + "bbox": [ + 121, + 164, + 317, + 175 + ], + "type": "text", + "content": "End your final answer with 'The answer is ." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 163, + 195, + 446, + 207 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 163, + 195, + 446, + 207 + ], + "spans": [ + { + "bbox": [ + 163, + 195, + 446, + 207 + ], + "type": "text", + "content": "Figure 7: Few-shot-cot-restrict prompt for generating LLM responses." + } + ] + } + ], + "index": 3, + "type": "text" + }, + { + "bbox": [ + 121, + 273, + 267, + 283 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 273, + 267, + 283 + ], + "spans": [ + { + "bbox": [ + 121, + 273, + 267, + 283 + ], + "type": "text", + "content": "***** Start In-Context Examples ****" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 121, + 284, + 489, + 343 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 284, + 489, + 343 + ], + "spans": [ + { + "bbox": [ + 121, + 284, + 489, + 343 + ], + "type": "text", + "content": "Q: A late game rally by Washington led them to the Eagles' 26 yard line. A shot to the end zone by Robert Griffin III would be intercepted by Brandon Boykin, clinching an Eagles win. The Eagles would move to 6-5. This is the Eagles first win at Lincoln Financial Field since Week 4 of the 2012 season, because prior to this game, the Eagles had never won a game in their home stadium in 414 days since that same week, snapping a 10-game losing streak at home with this win. How many more wins than losses did the Eagles have after this game?" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 121, + 344, + 193, + 353 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 344, + 193, + 353 + ], + "spans": [ + { + "bbox": [ + 121, + 344, + 193, + 353 + ], + "type": "text", + "content": "A: The answer is 1." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 121, + 363, + 489, + 423 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 363, + 489, + 423 + ], + "spans": [ + { + "bbox": [ + 121, + 363, + 489, + 423 + ], + "type": "text", + "content": "Q: The population of Sevastopol proper is 418,987 (01.01.16), making it the largest in the Crimean Peninsula. The city's agglomeration has about 600,000 people (2015). According to the Ukrainian Census (2001), the ethnic groups of Sevastopol include Russians (71.6%), Ukrainians (22.4%), Belarusians (1.6%), Tatars (0.7%), Crimean Tatars (0.5%), Armenians (0.3%), Jews (0.3%), Moldovans (0.2%), and Azerbaijani people (0.2%). Which ethnic has a higher percentage of the population in Sevastopol: Russians or Armenians?" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 121, + 424, + 220, + 433 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 424, + 220, + 433 + ], + "spans": [ + { + "bbox": [ + 121, + 424, + 220, + 433 + ], + "type": "text", + "content": "A: The answer is Russians." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 121, + 443, + 489, + 512 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 443, + 489, + 512 + ], + "spans": [ + { + "bbox": [ + 121, + 443, + 489, + 512 + ], + "type": "text", + "content": "Q: the most common crimes in the ACT are property related crimes, unlawful entry with intent and motor vehicle theft. They affected 2,304 and 966 people (580 and 243 per 100,000 persons respectively). Homicide and related offences—murder, attempted murder and manslaughter, but excluding driving causing death and conspiracy to murder—affect 1.0 per 100,000 persons, which is below the national average of 1.9 per 100,000. Rates of sexual assault (64.4 per 100,000 persons) are also below the national average (98.5 per 100,000). Which was there a higher national average for, homicide and related offences or sexual assault?" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 121, + 514, + 238, + 522 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 514, + 238, + 522 + ], + "spans": [ + { + "bbox": [ + 121, + 514, + 238, + 522 + ], + "type": "text", + "content": "A: The answer is sexual assault." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 121, + 533, + 489, + 572 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 533, + 489, + 572 + ], + "spans": [ + { + "bbox": [ + 121, + 533, + 489, + 572 + ], + "type": "text", + "content": "Q: In the county, the population was spread out with " + }, + { + "bbox": [ + 121, + 533, + 489, + 572 + ], + "type": "inline_equation", + "content": "21.7\\%" + }, + { + "bbox": [ + 121, + 533, + 489, + 572 + ], + "type": "text", + "content": " under the age of 18, " + }, + { + "bbox": [ + 121, + 533, + 489, + 572 + ], + "type": "inline_equation", + "content": "8.5\\%" + }, + { + "bbox": [ + 121, + 533, + 489, + 572 + ], + "type": "text", + "content": " from 18 to 24, " + }, + { + "bbox": [ + 121, + 533, + 489, + 572 + ], + "type": "inline_equation", + "content": "26.9\\%" + }, + { + "bbox": [ + 121, + 533, + 489, + 572 + ], + "type": "text", + "content": " from 25 to 44, " + }, + { + "bbox": [ + 121, + 533, + 489, + 572 + ], + "type": "inline_equation", + "content": "27.7\\%" + }, + { + "bbox": [ + 121, + 533, + 489, + 572 + ], + "type": "text", + "content": " from 45 to 64, and " + }, + { + "bbox": [ + 121, + 533, + 489, + 572 + ], + "type": "inline_equation", + "content": "15.0\\%" + }, + { + "bbox": [ + 121, + 533, + 489, + 572 + ], + "type": "text", + "content": " who were 65 years of age or older. The median age was 40 years. For every 100 females, there were 94.4 males. For every 100 females age 18 and over, there were 98.7 males. How many percent were not from 45 to 64?" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 121, + 573, + 204, + 582 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 573, + 204, + 582 + ], + "spans": [ + { + "bbox": [ + 121, + 573, + 204, + 582 + ], + "type": "text", + "content": "A: The answer is 72.3." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 121, + 592, + 489, + 642 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 592, + 489, + 642 + ], + "spans": [ + { + "bbox": [ + 121, + 592, + 489, + 642 + ], + "type": "text", + "content": "Q: The median age in the city was 35.1 years. " + }, + { + "bbox": [ + 121, + 592, + 489, + 642 + ], + "type": "inline_equation", + "content": "24.2\\%" + }, + { + "bbox": [ + 121, + 592, + 489, + 642 + ], + "type": "text", + "content": " of residents were under the age of 18; " + }, + { + "bbox": [ + 121, + 592, + 489, + 642 + ], + "type": "inline_equation", + "content": "7.9\\%" + }, + { + "bbox": [ + 121, + 592, + 489, + 642 + ], + "type": "text", + "content": " were between the ages of 18 and 24; " + }, + { + "bbox": [ + 121, + 592, + 489, + 642 + ], + "type": "inline_equation", + "content": "33.8\\%" + }, + { + "bbox": [ + 121, + 592, + 489, + 642 + ], + "type": "text", + "content": " were from 25 to 44; " + }, + { + "bbox": [ + 121, + 592, + 489, + 642 + ], + "type": "inline_equation", + "content": "24.6\\%" + }, + { + "bbox": [ + 121, + 592, + 489, + 642 + ], + "type": "text", + "content": " were from 45 to 64; and " + }, + { + "bbox": [ + 121, + 592, + 489, + 642 + ], + "type": "inline_equation", + "content": "9.5\\%" + }, + { + "bbox": [ + 121, + 592, + 489, + 642 + ], + "type": "text", + "content": " were 65 years of age or older. The gender makeup of the city was " + }, + { + "bbox": [ + 121, + 592, + 489, + 642 + ], + "type": "inline_equation", + "content": "48.6\\%" + }, + { + "bbox": [ + 121, + 592, + 489, + 642 + ], + "type": "text", + "content": " male and " + }, + { + "bbox": [ + 121, + 592, + 489, + 642 + ], + "type": "inline_equation", + "content": "51.4\\%" + }, + { + "bbox": [ + 121, + 592, + 489, + 642 + ], + "type": "text", + "content": " females. How many more people, in terms of percentage, were in the largest age group compared to the second smallest?" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 121, + 643, + 204, + 651 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 643, + 204, + 651 + ], + "spans": [ + { + "bbox": [ + 121, + 643, + 204, + 651 + ], + "type": "text", + "content": "A: The answer is 24.3." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 121, + 652, + 264, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 652, + 264, + 662 + ], + "spans": [ + { + "bbox": [ + 121, + 652, + 264, + 662 + ], + "type": "text", + "content": "***** End In-Context Examples ****" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 223, + 683, + 386, + 695 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 223, + 683, + 386, + 695 + ], + "spans": [ + { + "bbox": [ + 223, + 683, + 386, + 695 + ], + "type": "text", + "content": "Figure 8: Example of \"examples\" fields." + } + ] + } + ], + "index": 16, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "24" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 23 + }, + { + "para_blocks": [ + { + "bbox": [ + 120, + 79, + 489, + 135 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 79, + 489, + 135 + ], + "spans": [ + { + "bbox": [ + 120, + 79, + 489, + 135 + ], + "type": "text", + "content": "Q: Let " + }, + { + "bbox": [ + 120, + 79, + 489, + 135 + ], + "type": "inline_equation", + "content": "ABCD" + }, + { + "bbox": [ + 120, + 79, + 489, + 135 + ], + "type": "text", + "content": " be a tetrahedron such that " + }, + { + "bbox": [ + 120, + 79, + 489, + 135 + ], + "type": "inline_equation", + "content": "AB = CD = \\sqrt{41}" + }, + { + "bbox": [ + 120, + 79, + 489, + 135 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 120, + 79, + 489, + 135 + ], + "type": "inline_equation", + "content": "AC = BD = \\sqrt{80}" + }, + { + "bbox": [ + 120, + 79, + 489, + 135 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 120, + 79, + 489, + 135 + ], + "type": "inline_equation", + "content": "BC = AD = \\sqrt{89}" + }, + { + "bbox": [ + 120, + 79, + 489, + 135 + ], + "type": "text", + "content": ". There exists a point " + }, + { + "bbox": [ + 120, + 79, + 489, + 135 + ], + "type": "inline_equation", + "content": "I" + }, + { + "bbox": [ + 120, + 79, + 489, + 135 + ], + "type": "text", + "content": " inside the tetrahedron such that the distances from " + }, + { + "bbox": [ + 120, + 79, + 489, + 135 + ], + "type": "inline_equation", + "content": "I" + }, + { + "bbox": [ + 120, + 79, + 489, + 135 + ], + "type": "text", + "content": " to each of the faces of the tetrahedron are all equal. This distance can be written in the form " + }, + { + "bbox": [ + 120, + 79, + 489, + 135 + ], + "type": "inline_equation", + "content": "\\frac{m\\sqrt{n}}{p}" + }, + { + "bbox": [ + 120, + 79, + 489, + 135 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 120, + 79, + 489, + 135 + ], + "type": "inline_equation", + "content": "m, n" + }, + { + "bbox": [ + 120, + 79, + 489, + 135 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 120, + 79, + 489, + 135 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 120, + 79, + 489, + 135 + ], + "type": "text", + "content": " are positive integers, " + }, + { + "bbox": [ + 120, + 79, + 489, + 135 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 120, + 79, + 489, + 135 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 120, + 79, + 489, + 135 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 120, + 79, + 489, + 135 + ], + "type": "text", + "content": " are relatively prime, and " + }, + { + "bbox": [ + 120, + 79, + 489, + 135 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 120, + 79, + 489, + 135 + ], + "type": "text", + "content": " is not divisible by the square of any prime. Find " + }, + { + "bbox": [ + 120, + 79, + 489, + 135 + ], + "type": "inline_equation", + "content": "m + n + p" + }, + { + "bbox": [ + 120, + 79, + 489, + 135 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 121, + 136, + 132, + 144 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 136, + 132, + 144 + ], + "spans": [ + { + "bbox": [ + 121, + 136, + 132, + 144 + ], + "type": "text", + "content": "A:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 225, + 163, + 384, + 176 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 225, + 163, + 384, + 176 + ], + "spans": [ + { + "bbox": [ + 225, + 163, + 384, + 176 + ], + "type": "text", + "content": "Figure 9: Example of \"question\" fields." + } + ] + } + ], + "index": 2, + "type": "text" + }, + { + "bbox": [ + 120, + 196, + 489, + 218 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 196, + 489, + 218 + ], + "spans": [ + { + "bbox": [ + 120, + 196, + 489, + 218 + ], + "type": "text", + "content": "You are a diligent and precise assistant tasked with evaluating the correctness of responses. Think step by step as you make your evaluation." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 119, + 227, + 489, + 258 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 227, + 489, + 258 + ], + "spans": [ + { + "bbox": [ + 119, + 227, + 489, + 258 + ], + "type": "text", + "content": "You will receive a question, an output sentence, and the correct answer. Your task is to determine if the output sentence accurately answers the question based on the provided correct answer. Think step by step and respond with either [Correct] or [Incorrect]." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 121, + 266, + 206, + 276 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 266, + 206, + 276 + ], + "spans": [ + { + "bbox": [ + 121, + 266, + 206, + 276 + ], + "type": "text", + "content": "Special considerations:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 121, + 277, + 489, + 367 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 121, + 277, + 489, + 307 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 277, + 489, + 307 + ], + "spans": [ + { + "bbox": [ + 121, + 277, + 489, + 307 + ], + "type": "text", + "content": "1. **Multiple Answers**: If the output contains multiple answers, evaluate whether later answers modify or correct earlier ones. In such cases, compare the final answer with the correct answer. If the final answer is unclear or incorrect, respond with [Incorrect]." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 121, + 307, + 489, + 327 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 307, + 489, + 327 + ], + "spans": [ + { + "bbox": [ + 121, + 307, + 489, + 327 + ], + "type": "text", + "content": "2. **Mathematical Problems**: If the formats differ but the answers are mathematically equivalent, respond with [Correct]." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 121, + 327, + 489, + 346 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 327, + 489, + 346 + ], + "spans": [ + { + "bbox": [ + 121, + 327, + 489, + 346 + ], + "type": "text", + "content": "3. **Explicit Options**: If the question provides explicit candidate answers, the output will be considered correct if it clearly indicates the correct option's code or the correct option's content." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 121, + 346, + 489, + 367 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 346, + 489, + 367 + ], + "spans": [ + { + "bbox": [ + 121, + 346, + 489, + 367 + ], + "type": "text", + "content": "4. **No Explicit Options**: If the question does not provide explicit options, the output must align with the correct answer in content and meaning to be considered [Correct]." + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "text" + }, + { + "type": "code", + "bbox": [ + 121, + 368, + 338, + 459 + ], + "blocks": [ + { + "bbox": [ + 121, + 368, + 338, + 459 + ], + "lines": [ + { + "bbox": [ + 121, + 368, + 338, + 459 + ], + "spans": [ + { + "bbox": [ + 121, + 368, + 338, + 459 + ], + "type": "text", + "content": "Please present your response in the following JSON format: { \"reasoning\": \"Your step-by-step reasoning here.\", \"judgment\": \"Correct or Incorrect\" } Question: \"\"{question}\"\" Output sentence: \"\"{output}\"\" Correct answer: {answer}" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "code_body" + } + ], + "index": 11, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "bbox": [ + 216, + 485, + 394, + 498 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 216, + 485, + 394, + 498 + ], + "spans": [ + { + "bbox": [ + 216, + 485, + 394, + 498 + ], + "type": "text", + "content": "Figure 10: Prompt I for GPT-4o annotation." + } + ] + } + ], + "index": 12, + "type": "text" + }, + { + "bbox": [ + 104, + 520, + 506, + 554 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 520, + 506, + 554 + ], + "spans": [ + { + "bbox": [ + 104, + 520, + 506, + 554 + ], + "type": "text", + "content": "by Prometheus series models. The official prompt template for the CompassJudger-1 series models corresponds to pairwise evaluation, so the prompt template used by this series is the same as that for the xVerify model, as shown in Figure 19." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 568, + 218, + 581 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 568, + 218, + 581 + ], + "spans": [ + { + "bbox": [ + 105, + 568, + 218, + 581 + ], + "type": "text", + "content": "D.5 Prompts for xVerify" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 590, + 506, + 635 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 590, + 506, + 635 + ], + "spans": [ + { + "bbox": [ + 104, + 590, + 506, + 635 + ], + "type": "text", + "content": "Figure 19 shows the prompt template used to construct the input for the xVerify model. This template is used both for training and evaluation of the xVerify model. Specifically, \"question,\" \"output,\" and \"answer\" correspond to the question content, the LLM response, and the reference answer, respectively." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 652, + 320, + 666 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 652, + 320, + 666 + ], + "spans": [ + { + "bbox": [ + 105, + 652, + 320, + 666 + ], + "type": "text", + "content": "E Supplementary Experimental Results" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 678, + 349, + 690 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 678, + 349, + 690 + ], + "spans": [ + { + "bbox": [ + 105, + 678, + 349, + 690 + ], + "type": "text", + "content": "E.1 Evaluation Accuracy Results of All xVerify Models" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 104, + 700, + 505, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 700, + 505, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 700, + 505, + 723 + ], + "type": "text", + "content": "Tables 19 and 20 present the performance of all " + }, + { + "bbox": [ + 104, + 700, + 505, + 723 + ], + "type": "inline_equation", + "content": "14\\mathrm{x}" + }, + { + "bbox": [ + 104, + 700, + 505, + 723 + ], + "type": "text", + "content": " Verify models on the test set and the generalization set, respectively. Overall, each xVerify model achieves an F1 score and accuracy exceeding " + }, + { + "bbox": [ + 104, + 700, + 505, + 723 + ], + "type": "inline_equation", + "content": "96.5\\%" + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "25" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 24 + }, + { + "para_blocks": [ + { + "bbox": [ + 120, + 79, + 489, + 100 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 79, + 489, + 100 + ], + "spans": [ + { + "bbox": [ + 120, + 79, + 489, + 100 + ], + "type": "text", + "content": "You are a diligent and precise assistant tasked with evaluating the correctness of responses. Think step by step as you make your evaluation." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 119, + 109, + 489, + 129 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 109, + 489, + 129 + ], + "spans": [ + { + "bbox": [ + 119, + 109, + 489, + 129 + ], + "type": "text", + "content": "We request your feedback on whether the model's response correctly answers the user question above. Follow these steps to make your evaluation:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 121, + 129, + 488, + 179 + ], + "type": "list", + "angle": 0, + "index": 6, + "blocks": [ + { + "bbox": [ + 121, + 129, + 321, + 139 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 129, + 321, + 139 + ], + "spans": [ + { + "bbox": [ + 121, + 129, + 321, + 139 + ], + "type": "text", + "content": "1. Think step by step: Read the user question carefully." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 121, + 140, + 451, + 148 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 140, + 451, + 148 + ], + "spans": [ + { + "bbox": [ + 121, + 140, + 451, + 148 + ], + "type": "text", + "content": "2. Think step by step: Review the reference answer and understand the key points it covers." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 121, + 149, + 405, + 159 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 149, + 405, + 159 + ], + "spans": [ + { + "bbox": [ + 121, + 149, + 405, + 159 + ], + "type": "text", + "content": "3. Think step by step: Compare the model's answer with the reference answer." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 121, + 159, + 488, + 179 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 159, + 488, + 179 + ], + "spans": [ + { + "bbox": [ + 121, + 159, + 488, + 179 + ], + "type": "text", + "content": "4. Think step by step: Determine if the model's answer addresses the key points in the reference answer and correctly answers the question." + } + ] + } + ], + "index": 5 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 119, + 188, + 489, + 209 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 188, + 489, + 209 + ], + "spans": [ + { + "bbox": [ + 119, + 188, + 489, + 209 + ], + "type": "text", + "content": "First, provide your reasoning in detail. Then, clearly state your judgment as either \"Correct\" or \"Incorrect.\"" + } + ] + } + ], + "index": 7 + }, + { + "type": "code", + "bbox": [ + 120, + 209, + 338, + 300 + ], + "blocks": [ + { + "bbox": [ + 120, + 209, + 338, + 300 + ], + "lines": [ + { + "bbox": [ + 120, + 209, + 338, + 300 + ], + "spans": [ + { + "bbox": [ + 120, + 209, + 338, + 300 + ], + "type": "text", + "content": "Please present your response in the following JSON format: \n{ \"reasoning\": \"Your step-by-step reasoning here.\", \"judgment\": \"Correct or Incorrect\" \n} \nQuestion: {question} \nReference Answer: {answer} \nModel's Answer: {output}" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "code_body" + } + ], + "index": 8, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "bbox": [ + 214, + 319, + 394, + 331 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 214, + 319, + 394, + 331 + ], + "spans": [ + { + "bbox": [ + 214, + 319, + 394, + 331 + ], + "type": "text", + "content": "Figure 11: Prompt II for GPT-4o annotation." + } + ] + } + ], + "index": 9, + "type": "text" + }, + { + "bbox": [ + 104, + 354, + 504, + 376 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 354, + 504, + 376 + ], + "spans": [ + { + "bbox": [ + 104, + 354, + 504, + 376 + ], + "type": "text", + "content": "on the test set and over " + }, + { + "bbox": [ + 104, + 354, + 504, + 376 + ], + "type": "inline_equation", + "content": "95.52\\%" + }, + { + "bbox": [ + 104, + 354, + 504, + 376 + ], + "type": "text", + "content": " on the generalization set. These results demonstrate not only the effectiveness of the xVerify models for evaluation tasks but also the high quality of the VAR dataset." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 380, + 504, + 459 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 380, + 504, + 459 + ], + "spans": [ + { + "bbox": [ + 104, + 380, + 504, + 459 + ], + "type": "text", + "content": "A comparison between the results on the two datasets shows that the performance on the generalization set experiences a slight decline relative to the test set, with the decrease not exceeding " + }, + { + "bbox": [ + 104, + 380, + 504, + 459 + ], + "type": "inline_equation", + "content": "1.6\\%" + }, + { + "bbox": [ + 104, + 380, + 504, + 459 + ], + "type": "text", + "content": ". Moreover, models with larger parameter sizes exhibit smaller performance drops. This indicates that the xVerify models possess strong generalization capabilities, which further improve with an increase in parameter scale. Additionally, it is observed across both datasets that while the performance of xVerify models generally enhances with the increment of parameter size, beyond a certain threshold, further increases in parameter scale do not lead to additional performance gains." + } + ] + } + ], + "index": 11 + }, + { + "type": "table", + "bbox": [ + 107, + 498, + 503, + 644 + ], + "blocks": [ + { + "bbox": [ + 104, + 476, + 504, + 498 + ], + "lines": [ + { + "bbox": [ + 104, + 476, + 504, + 498 + ], + "spans": [ + { + "bbox": [ + 104, + 476, + 504, + 498 + ], + "type": "text", + "content": "Table 19: Evaluation Accuracy Results on the Test Set: All xVerify Models. The best performance in each column is shown in **bold**, and the second-best performance is underlined." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 107, + 498, + 503, + 644 + ], + "lines": [ + { + "bbox": [ + 107, + 498, + 503, + 644 + ], + "spans": [ + { + "bbox": [ + 107, + 498, + 503, + 644 + ], + "type": "table", + "html": "
xVerify ModelMultiple ChoiceMathShort AnswerClassificationTotal
F1Acc.F1Acc.F1Acc.F1Acc.F1Acc.
xVerify-0.5B-I97.78%97.90%93.74%94.64%96.72%97.49%99.71%99.59%96.69%96.85%
xVerify-1B-I97.22%97.35%94.76%95.45%96.06%96.97%99.71%99.59%96.77%96.91%
xVerify-1.5B-I97.85%97.96%95.10%95.75%96.05%96.97%99.63%99.49%97.05%97.17%
xVerify-2B-I97.93%98.02%95.06%95.71%96.06%96.97%99.78%99.69%97.09%97.21%
xVerify-3B-Ia97.73%97.84%95.00%95.67%96.17%97.06%99.71%99.59%97.02%97.14%
xVerify-3B-Ib97.31%97.41%95.65%96.18%96.38%97.23%99.78%99.69%97.17%97.27%
xVerify-7B-I97.75%97.84%95.94%96.44%96.51%97.32%99.78%99.69%97.41%97.50%
xVerify-8B-I97.92%98.02%95.34%95.97%96.05%96.97%99.71%99.59%97.17%97.29%
xVerify-9B-C98.29%98.38%95.26%95.88%96.06%96.97%99.78%99.69%97.25%97.37%
xVerify-9B-I97.43%97.53%95.75%96.27%96.06%96.97%99.78%99.69%97.19%97.29%
xVerify-14B-Ia97.49%97.59%95.73%96.22%95.41%96.46%99.63%99.49%97.06%97.16%
xVerify-14B-Ib97.67%97.78%96.10%96.57%95.74%96.72%99.71%99.59%97.31%97.40%
xVerify-27B-I97.81%97.90%95.46%96.01%96.19%97.06%99.56%99.38%97.15%97.26%
xVerify-32B-I97.81%97.90%95.88%96.31%96.18%97.06%99.71%99.59%97.32%97.40%
", + "image_path": "af8a758a9ce34e27bf2d1dc172a720e7d29f1449eb7ef7443c3396698c925eb8.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "table_body" + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 667, + 460, + 680 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 667, + 460, + 680 + ], + "spans": [ + { + "bbox": [ + 104, + 667, + 460, + 680 + ], + "type": "text", + "content": "E.2 Computational Efficiency and Operational Cost of xVerify and Judge Models" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 689, + 504, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 689, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 689, + 504, + 723 + ], + "type": "text", + "content": "Table 21 displays the running time performance of the xVerify model and other judge models. Each model was evaluated using 200 randomly selected samples per question type from the generalization set, with running times measured in seconds. This data provides insights into the computational" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "26" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 25 + }, + { + "para_blocks": [ + { + "bbox": [ + 121, + 170, + 487, + 200 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 170, + 487, + 200 + ], + "spans": [ + { + "bbox": [ + 121, + 170, + 487, + 200 + ], + "type": "text", + "content": "You are an expert in mathematical calculations and data expressions. You are required to provide different equivalent forms of the standard answer for the following math problem." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 121, + 201, + 212, + 210 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 201, + 212, + 210 + ], + "spans": [ + { + "bbox": [ + 121, + 201, + 212, + 210 + ], + "type": "text", + "content": "Problem: {question}" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 121, + 210, + 198, + 220 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 210, + 198, + 220 + ], + "spans": [ + { + "bbox": [ + 121, + 210, + 198, + 220 + ], + "type": "text", + "content": "Answer: {answer}" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 121, + 230, + 169, + 239 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 230, + 169, + 239 + ], + "spans": [ + { + "bbox": [ + 121, + 230, + 169, + 239 + ], + "type": "text", + "content": "Example 1:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 121, + 240, + 490, + 270 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 240, + 490, + 270 + ], + "spans": [ + { + "bbox": [ + 121, + 240, + 490, + 270 + ], + "type": "text", + "content": " Problem: Let $ \\alpha \\beta \\gamma be the radian measure of the smallest angle in a $3-4-5$ right triangle. Let $ \\alpha \\beta \\gamma be the radian measure of the smallest angle in a $7-24-25$ right triangle. Express $ \\alpha \\beta \\gamma in terms of $ \\alpha \\beta \\gamma$." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 121, + 270, + 279, + 280 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 270, + 279, + 280 + ], + "spans": [ + { + "bbox": [ + 121, + 270, + 279, + 280 + ], + "type": "text", + "content": " Answer: \\\\frac{\\backslashpi}{2} - 2\\alpha" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 121, + 281, + 155, + 289 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 281, + 155, + 289 + ], + "spans": [ + { + "bbox": [ + 121, + 281, + 155, + 289 + ], + "type": "text", + "content": "Output:" + } + ] + } + ], + "index": 6 + }, + { + "type": "code", + "bbox": [ + 123, + 290, + 320, + 340 + ], + "blocks": [ + { + "bbox": [ + 123, + 290, + 320, + 340 + ], + "lines": [ + { + "bbox": [ + 123, + 290, + 320, + 340 + ], + "spans": [ + { + "bbox": [ + 123, + 290, + 320, + 340 + ], + "type": "text", + "content": "\"\\\"json {\n \"answer1\": \"\\\"\\pi/2 - 2\\alpha\", \n \"answer2\": \"pi/2 - 2\\alpha\", \n \"answer3\": \"pi/2 - 2 * \\alpha\", \n \"answer4\": \"0.5 * \\pi - 2 * \\alpha\"\n}\");" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "code_body" + } + ], + "index": 7, + "sub_type": "code", + "guess_lang": "javascript" + }, + { + "bbox": [ + 121, + 350, + 169, + 359 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 350, + 169, + 359 + ], + "spans": [ + { + "bbox": [ + 121, + 350, + 169, + 359 + ], + "type": "text", + "content": "Example 2:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 121, + 360, + 495, + 399 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 360, + 495, + 399 + ], + "spans": [ + { + "bbox": [ + 121, + 360, + 495, + 399 + ], + "type": "text", + "content": "Problem: A volcano erupts and spews ash into the sky. The ash cloud spreads out in a diameter eighteen times as far as the distance it shot up into the sky. If the ashes erupted three hundred feet into the sky, what was the radius of the ash cloud in feet?" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 121, + 400, + 179, + 408 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 400, + 179, + 408 + ], + "spans": [ + { + "bbox": [ + 121, + 400, + 179, + 408 + ], + "type": "text", + "content": "Answer: 2700" + } + ] + } + ], + "index": 10 + }, + { + "type": "code", + "bbox": [ + 121, + 410, + 348, + 479 + ], + "blocks": [ + { + "bbox": [ + 121, + 410, + 348, + 479 + ], + "lines": [ + { + "bbox": [ + 121, + 410, + 348, + 479 + ], + "spans": [ + { + "bbox": [ + 121, + 410, + 348, + 479 + ], + "type": "text", + "content": "Output:\n```\n\"\\\"json {\n \"answer1\": \"2.7×10^3\",\n \"answer2\": \"2700.0\",\n \"answer3\": \"2.7 \\times times 10^3\",\n \"answer4\": \"$2.7 \\times times 10^3$\",\n \"answer5\": \"Two thousand seven hundred\"}''" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "code_body" + } + ], + "index": 11, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "bbox": [ + 121, + 489, + 179, + 498 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 489, + 179, + 498 + ], + "spans": [ + { + "bbox": [ + 121, + 489, + 179, + 498 + ], + "type": "text", + "content": "Please note:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 121, + 499, + 431, + 509 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 499, + 431, + 509 + ], + "spans": [ + { + "bbox": [ + 121, + 499, + 431, + 509 + ], + "type": "text", + "content": "1. You need to provide 3 to 5 different standard forms of the answer" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 121, + 510, + 471, + 528 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 510, + 471, + 528 + ], + "spans": [ + { + "bbox": [ + 121, + 510, + 471, + 528 + ], + "type": "text", + "content": "2. Each different form must be equivalent to the standard answer, i.e., it should still be a correct and valid answer." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 121, + 529, + 466, + 548 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 529, + 466, + 548 + ], + "spans": [ + { + "bbox": [ + 121, + 529, + 466, + 548 + ], + "type": "text", + "content": "3. You may use LaTeX, scientific notation, or other standard mathematical expressions." + } + ] + } + ], + "index": 15 + }, + { + "type": "code", + "bbox": [ + 121, + 549, + 410, + 588 + ], + "blocks": [ + { + "bbox": [ + 121, + 549, + 410, + 588 + ], + "lines": [ + { + "bbox": [ + 121, + 549, + 410, + 588 + ], + "spans": [ + { + "bbox": [ + 121, + 549, + 410, + 588 + ], + "type": "text", + "content": "4. Please follow the JSON format below for the output:\n```\n\"\\\"json {\n \"answer1\": \"xxx\", \"answer2\": \"xxx\", \"answer3\": \"xxx\", ...\n}...\"" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "code_body" + } + ], + "index": 16, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "bbox": [ + 171, + 617, + 438, + 629 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 171, + 617, + 438, + 629 + ], + "spans": [ + { + "bbox": [ + 171, + 617, + 438, + 629 + ], + "type": "text", + "content": "Figure 12: Prompt for Generating Alternative Reference Answers." + } + ] + } + ], + "index": 17, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "27" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 26 + }, + { + "para_blocks": [ + { + "type": "code", + "bbox": [ + 104, + 170, + 506, + 604 + ], + "blocks": [ + { + "bbox": [ + 104, + 170, + 506, + 604 + ], + "lines": [ + { + "bbox": [ + 104, + 170, + 506, + 604 + ], + "spans": [ + { + "bbox": [ + 104, + 170, + 506, + 604 + ], + "type": "text", + "content": "You are an expert in mathematical calculations and data expressions. For an answer to a specific mathematical problem, you are required to provide equivalent and different expressions of the mathematical result. Answer: {output} \nExample 1: Answer: The answer is " + }, + { + "bbox": [ + 104, + 170, + 506, + 604 + ], + "type": "inline_equation", + "content": "\\beta = \\backslash" + }, + { + "bbox": [ + 104, + 170, + 506, + 604 + ], + "type": "text", + "content": " frac{pi{2}-2\\alpha}. Output: \"'json { \"answer1\": \"The answer is " + }, + { + "bbox": [ + 104, + 170, + 506, + 604 + ], + "type": "inline_equation", + "content": "\\backslash" + }, + { + "bbox": [ + 104, + 170, + 506, + 604 + ], + "type": "text", + "content": " pi/2 - 2\\alpha}. , \"answer2\": \"The answer is pi/2 - 2\\alpha}. , \"answer3\": \"The answer is pi/2 - 2\\* alpha.\", \"answer4\": \"The answer is " + }, + { + "bbox": [ + 104, + 170, + 506, + 604 + ], + "type": "inline_equation", + "content": "0.5*" + }, + { + "bbox": [ + 104, + 170, + 506, + 604 + ], + "type": "text", + "content": " pi-2\\* alpha.\" }\"\" \nExample 2: Answer: The answer is 2700 feet. Output: \"'json { \"answer1\": \"The answer is " + }, + { + "bbox": [ + 104, + 170, + 506, + 604 + ], + "type": "inline_equation", + "content": "2.7\\times 10^{-3}" + }, + { + "bbox": [ + 104, + 170, + 506, + 604 + ], + "type": "text", + "content": " feet.\", \"answer2\": \"The answer is 2700.0 feet.\", \"answer3\": \"The answer is 2.7 times " + }, + { + "bbox": [ + 104, + 170, + 506, + 604 + ], + "type": "inline_equation", + "content": "10^{-3}" + }, + { + "bbox": [ + 104, + 170, + 506, + 604 + ], + "type": "text", + "content": " feet.\", \"answer4\": \"The answer is " + }, + { + "bbox": [ + 104, + 170, + 506, + 604 + ], + "type": "inline_equation", + "content": "\\$ 2.7" + }, + { + "bbox": [ + 104, + 170, + 506, + 604 + ], + "type": "text", + "content": " times " + }, + { + "bbox": [ + 104, + 170, + 506, + 604 + ], + "type": "inline_equation", + "content": "10^{-3}\\{3\\}" + }, + { + "bbox": [ + 104, + 170, + 506, + 604 + ], + "type": "text", + "content": " feet.\", \"answer5\": \"The answer is Two thousand seven hundred feet.\" }\"\" \nPlease note: 1. You need to provide 3 to 5 different expressions, each replacing the mathematical result with an equivalent and different form. 2. Each expression must be exactly equivalent to the target answer to ensure its correctness. 3. You can use LaTeX, scientific notation, or other standard mathematical formats. 4. Please output the result in the following JSON format: \"'json { \"answer1\": \"The answer is xxx\", \"answer2\": \"The answer is xxx\", \"answer3\": \"The answer is xxx\", \"answer4\": \"The answer is xxx\", \"answer5\": \"The answer is xxx\" }\"\"" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "code_body" + } + ], + "index": 0, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "bbox": [ + 164, + 612, + 444, + 625 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 164, + 612, + 444, + 625 + ], + "spans": [ + { + "bbox": [ + 164, + 612, + 444, + 625 + ], + "type": "text", + "content": "Figure 13: Prompt for Generating Diverse Final Answer Expressions." + } + ] + } + ], + "index": 1, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "28" + } + ] + } + ], + "index": 2 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 27 + }, + { + "para_blocks": [ + { + "bbox": [ + 120, + 102, + 489, + 144 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 102, + 489, + 144 + ], + "spans": [ + { + "bbox": [ + 120, + 102, + 489, + 144 + ], + "type": "text", + "content": "You are a diligent and precise assistant tasked with evaluating the correctness of responses. You will receive a question, an output sentence, and the correct answer. Your task is to determine if the output sentence accurately answers the question based on the provided correct answer. Respond with either [Correct] or [Incorrect]." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 121, + 152, + 206, + 162 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 152, + 206, + 162 + ], + "spans": [ + { + "bbox": [ + 121, + 152, + 206, + 162 + ], + "type": "text", + "content": "Special considerations:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 121, + 163, + 489, + 252 + ], + "type": "list", + "angle": 0, + "index": 6, + "blocks": [ + { + "bbox": [ + 121, + 163, + 488, + 193 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 163, + 488, + 193 + ], + "spans": [ + { + "bbox": [ + 121, + 163, + 488, + 193 + ], + "type": "text", + "content": "1. **Multiple Answers**: If the output contains multiple answers, evaluate whether later answers modify or correct earlier ones. In such cases, compare the final answer with the correct answer. If the final answer is unclear or incorrect, respond with [Incorrect]." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 121, + 193, + 489, + 213 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 193, + 489, + 213 + ], + "spans": [ + { + "bbox": [ + 121, + 193, + 489, + 213 + ], + "type": "text", + "content": "2. **Mathematical Problems**: If the formats differ but the answers are mathematically equivalent, respond with [Correct]." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 121, + 213, + 488, + 232 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 213, + 488, + 232 + ], + "spans": [ + { + "bbox": [ + 121, + 213, + 488, + 232 + ], + "type": "text", + "content": "3. **Explicit Options**: If the question provides explicit candidate answers, the output will be considered correct if it clearly indicates the correct option's code or the correct option's content." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 121, + 233, + 488, + 252 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 233, + 488, + 252 + ], + "spans": [ + { + "bbox": [ + 121, + 233, + 488, + 252 + ], + "type": "text", + "content": "4. **No Explicit Options**: If the question does not provide explicit options, the output must align with the correct answer in content and meaning to be considered [Correct]." + } + ] + } + ], + "index": 5 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 121, + 253, + 338, + 262 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 253, + 338, + 262 + ], + "spans": [ + { + "bbox": [ + 121, + 253, + 338, + 262 + ], + "type": "text", + "content": "Please present your response in the following JSON format:" + } + ] + } + ], + "index": 7 + }, + { + "type": "code", + "bbox": [ + 121, + 263, + 272, + 294 + ], + "blocks": [ + { + "bbox": [ + 121, + 263, + 272, + 294 + ], + "lines": [ + { + "bbox": [ + 121, + 263, + 272, + 294 + ], + "spans": [ + { + "bbox": [ + 121, + 263, + 272, + 294 + ], + "type": "text", + "content": "{\" judgement\": \"Correct or Incorrect\"}" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "code_body" + } + ], + "index": 8, + "sub_type": "code", + "guess_lang": "json" + }, + { + "bbox": [ + 121, + 301, + 222, + 312 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 301, + 222, + 312 + ], + "spans": [ + { + "bbox": [ + 121, + 301, + 222, + 312 + ], + "type": "text", + "content": "Question: \"\"{question}\"\"" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 121, + 312, + 248, + 322 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 312, + 248, + 322 + ], + "spans": [ + { + "bbox": [ + 121, + 312, + 248, + 322 + ], + "type": "text", + "content": "Output sentence: \"\"{response}\"\"" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 121, + 323, + 225, + 332 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 323, + 225, + 332 + ], + "spans": [ + { + "bbox": [ + 121, + 323, + 225, + 332 + ], + "type": "text", + "content": "Correct answer: {reference}" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 223, + 352, + 386, + 365 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 223, + 352, + 386, + 365 + ], + "spans": [ + { + "bbox": [ + 223, + 352, + 386, + 365 + ], + "type": "text", + "content": "Figure 14: Prompt for GPT-4o as Judge." + } + ] + } + ], + "index": 12, + "type": "text" + }, + { + "bbox": [ + 120, + 425, + 489, + 467 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 425, + 489, + 467 + ], + "spans": [ + { + "bbox": [ + 120, + 425, + 489, + 467 + ], + "type": "text", + "content": "You are a diligent and precise assistant tasked with evaluating the correctness of responses. Think step by step as you make your evaluation. You will receive a question, an output sentence, and the correct answer. Your task is to determine if the output sentence accurately answers the question based on the provided correct answer. Think step by step and respond with either [Correct] or [Incorrect]." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 121, + 475, + 206, + 485 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 475, + 206, + 485 + ], + "spans": [ + { + "bbox": [ + 121, + 475, + 206, + 485 + ], + "type": "text", + "content": "Special considerations:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 121, + 486, + 489, + 575 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 121, + 486, + 488, + 515 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 486, + 488, + 515 + ], + "spans": [ + { + "bbox": [ + 121, + 486, + 488, + 515 + ], + "type": "text", + "content": "1. **Multiple Answers**: If the output contains multiple answers, evaluate whether later answers modify or correct earlier ones. In such cases, compare the final answer with the correct answer. If the final answer is unclear or incorrect, respond with [Incorrect]." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 121, + 516, + 489, + 536 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 516, + 489, + 536 + ], + "spans": [ + { + "bbox": [ + 121, + 516, + 489, + 536 + ], + "type": "text", + "content": "2. **Mathematical Problems**: If the formats differ but the answers are mathematically equivalent, respond with [Correct]." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 121, + 536, + 488, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 536, + 488, + 555 + ], + "spans": [ + { + "bbox": [ + 121, + 536, + 488, + 555 + ], + "type": "text", + "content": "3. **Explicit Options**: If the question provides explicit candidate answers, the output will be considered correct if it clearly indicates the correct option's code or the correct option's content." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 121, + 555, + 488, + 575 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 555, + 488, + 575 + ], + "spans": [ + { + "bbox": [ + 121, + 555, + 488, + 575 + ], + "type": "text", + "content": "4. **No Explicit Options**: If the question does not provide explicit options, the output must align with the correct answer in content and meaning to be considered [Correct]." + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 121, + 576, + 338, + 586 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 576, + 338, + 586 + ], + "spans": [ + { + "bbox": [ + 121, + 576, + 338, + 586 + ], + "type": "text", + "content": "Please present your response in the following JSON format:" + } + ] + } + ], + "index": 20 + }, + { + "type": "code", + "bbox": [ + 121, + 586, + 320, + 628 + ], + "blocks": [ + { + "bbox": [ + 121, + 586, + 320, + 628 + ], + "lines": [ + { + "bbox": [ + 121, + 586, + 320, + 628 + ], + "spans": [ + { + "bbox": [ + 121, + 586, + 320, + 628 + ], + "type": "text", + "content": "\"reasoning\": \"Your step-by-step reasoning here.\", \"judgement\": \"Correct or Incorrect\"" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "code_body" + } + ], + "index": 21, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "bbox": [ + 121, + 634, + 222, + 645 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 634, + 222, + 645 + ], + "spans": [ + { + "bbox": [ + 121, + 634, + 222, + 645 + ], + "type": "text", + "content": "Question: \"\"{question}\"\"" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 121, + 645, + 248, + 655 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 645, + 248, + 655 + ], + "spans": [ + { + "bbox": [ + 121, + 645, + 248, + 655 + ], + "type": "text", + "content": "Output sentence: \"\"{response}\"" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 121, + 655, + 225, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 655, + 225, + 665 + ], + "spans": [ + { + "bbox": [ + 121, + 655, + 225, + 665 + ], + "type": "text", + "content": "Correct answer: {reference}" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 209, + 685, + 400, + 697 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 209, + 685, + 400, + 697 + ], + "spans": [ + { + "bbox": [ + 209, + 685, + 400, + 697 + ], + "type": "text", + "content": "Figure 15: Prompt for GPT-4o as Judge (CoT)." + } + ] + } + ], + "index": 25, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "29" + } + ] + } + ], + "index": 26 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 28 + }, + { + "para_blocks": [ + { + "type": "code", + "bbox": [ + 120, + 78, + 489, + 241 + ], + "blocks": [ + { + "bbox": [ + 120, + 78, + 489, + 241 + ], + "lines": [ + { + "bbox": [ + 120, + 78, + 489, + 241 + ], + "spans": [ + { + "bbox": [ + 120, + 78, + 489, + 241 + ], + "type": "text", + "content": "You are a helpful and precise assistant for checking the quality of the answer. \n[Question] \n{question} \n[Reference Answer] \n{reference} \n[Model's Answer] \n{response} \n[System] \nWe would like to request your feedback on the performance of the model's response to the user question displayed above. \nBased on the reference answer, please rate the accuracy of the response. The model receives an overall score on a scale of 1 to 10, where a higher score indicates better overall performance. \nPlease first output a single line containing only the score. In the subsequent line, please provide a comprehensive explanation of your evaluation, avoiding any potential bias. \n```java\n```java\nYou are a helpful and precise assistant for checking the quality of the answer. \n[Question] \n{question} \n[Reference Answer] \n{reference} \n[Model's Answer] \n{response} \n[System]" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "code_body" + } + ], + "index": 0, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "bbox": [ + 238, + 259, + 372, + 272 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 238, + 259, + 372, + 272 + ], + "spans": [ + { + "bbox": [ + 238, + 259, + 372, + 272 + ], + "type": "text", + "content": "Figure 16: Prompt for JudgeLM." + } + ] + } + ], + "index": 1, + "type": "text" + }, + { + "type": "code", + "bbox": [ + 121, + 294, + 489, + 316 + ], + "blocks": [ + { + "bbox": [ + 121, + 294, + 489, + 316 + ], + "lines": [ + { + "bbox": [ + 121, + 294, + 489, + 316 + ], + "spans": [ + { + "bbox": [ + 121, + 294, + 489, + 316 + ], + "type": "text", + "content": "[INST] Write critiques for a submitted response on a given user's query, incorporating the correct answer as a reference, and grade the response accordingly:" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "code_body" + } + ], + "index": 2, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "type": "code", + "bbox": [ + 120, + 324, + 489, + 456 + ], + "blocks": [ + { + "bbox": [ + 120, + 324, + 489, + 456 + ], + "lines": [ + { + "bbox": [ + 120, + 324, + 489, + 456 + ], + "spans": [ + { + "bbox": [ + 120, + 324, + 489, + 456 + ], + "type": "text", + "content": "[BEGIN DATA] \n\\*\\*\\* \n[Query]: {question} \n\\*\\*\\* \n[Correct Answer]: {reference} \n\\*\\*\\* \n[Response]: {response} \n\\*\\*\\* \n[END DATA] \nWrite critiques for this response. After that, you should give a final rating for the response on a scale of 1 to 10 by strictly following this format: \"[rating]\", for example: \"Rating: [[5]]\". [/INST]" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "code_body" + } + ], + "index": 3, + "sub_type": "code", + "guess_lang": "javascript" + }, + { + "bbox": [ + 243, + 474, + 366, + 487 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 243, + 474, + 366, + 487 + ], + "spans": [ + { + "bbox": [ + 243, + 474, + 366, + 487 + ], + "type": "text", + "content": "Figure 17: Prompt for Auto-J." + } + ] + } + ], + "index": 4, + "type": "text" + }, + { + "type": "table", + "bbox": [ + 108, + 529, + 503, + 675 + ], + "blocks": [ + { + "bbox": [ + 105, + 506, + 504, + 529 + ], + "lines": [ + { + "bbox": [ + 105, + 506, + 504, + 529 + ], + "spans": [ + { + "bbox": [ + 105, + 506, + 504, + 529 + ], + "type": "text", + "content": "Table 20: Evaluation Accuracy Results on the Generalization Set: All xVerify Models. The best performance in each column is shown in **bold**, and the second-best performance is **underlined**." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 108, + 529, + 503, + 675 + ], + "lines": [ + { + "bbox": [ + 108, + 529, + 503, + 675 + ], + "spans": [ + { + "bbox": [ + 108, + 529, + 503, + 675 + ], + "type": "table", + "html": "
xVerify ModelMultiple ChoiceMathShort AnswerClassificationTotal
F1Acc.F1Acc.F1Acc.F1Acc.F1Acc.
xVerify-0.5B-I96.49%96.10%80.00%91.94%96.95%97.00%99.03%98.53%95.29%95.53%
xVerify-1B-I96.10%95.66%82.45%92.51%97.32%97.35%98.92%98.37%95.43%95.62%
xVerify-1.5B-I96.76%96.38%83.58%93.12%97.46%97.49%98.88%98.29%95.85%96.03%
xVerify-2B-I96.27%95.82%82.11%92.51%97.60%97.63%98.98%98.45%95.57%95.75%
xVerify-3B-Ia96.44%95.99%86.10%94.25%97.31%97.35%99.03%98.53%96.11%96.27%
xVerify-3B-Ib96.21%95.71%86.20%94.15%97.60%97.63%99.03%98.53%96.08%96.23%
xVerify-7B-I96.16%95.66%87.86%94.87%97.45%97.49%98.93%98.37%96.22%96.37%
xVerify-8B-I96.67%96.27%86.76%94.61%97.45%97.49%99.03%98.53%96.33%96.49%
xVerify-9B-C97.00%96.66%87.08%94.71%97.45%97.49%98.98%98.45%96.45%96.61%
xVerify-9B-I96.06%95.55%87.47%94.76%97.53%97.56%99.13%98.68%96.23%96.38%
xVerify-14B-Ia96.11%95.60%90.20%95.74%97.32%97.35%99.13%98.68%96.53%96.65%
xVerify-14B-Ib96.35%95.88%87.88%94.92%97.45%97.49%98.93%98.37%96.30%96.44%
xVerify-27B-I96.01%95.49%85.64%93.99%97.32%97.35%99.13%98.68%95.93%96.09%
xVerify-32B-I96.22%95.71%90.09%95.59%97.32%97.35%99.03%98.53%96.50%96.60%
", + "image_path": "fe6fa443c497f26a8ddbc810733dddf78eeec3fa5aadf72694d2815fa363a742.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 700, + 504, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 700, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 700, + 504, + 723 + ], + "type": "text", + "content": "efficiency of each model under uniform testing conditions, thereby facilitating a comparative analysis of their real-time processing capabilities and scalability in practical applications." + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "type": "text", + "content": "30" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 29 + }, + { + "para_blocks": [ + { + "bbox": [ + 121, + 79, + 489, + 100 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 79, + 489, + 100 + ], + "spans": [ + { + "bbox": [ + 121, + 79, + 489, + 100 + ], + "type": "text", + "content": "You are a fair judge assistant tasked with providing clear, objective feedback based on specific criteria, ensuring each assessment reflects the absolute standards set for performance.\"" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 121, + 101, + 201, + 110 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 101, + 201, + 110 + ], + "spans": [ + { + "bbox": [ + 121, + 101, + 201, + 110 + ], + "type": "text", + "content": "Task Description:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 121, + 110, + 488, + 130 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 110, + 488, + 130 + ], + "spans": [ + { + "bbox": [ + 121, + 110, + 488, + 130 + ], + "type": "text", + "content": "An instruction (might include an Input inside it), a response to evaluate, a reference answer that gets a score of 5, and a score rubric representing a evaluation criteria are given." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 121, + 130, + 488, + 191 + ], + "type": "list", + "angle": 0, + "index": 6, + "blocks": [ + { + "bbox": [ + 121, + 130, + 488, + 150 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 130, + 488, + 150 + ], + "spans": [ + { + "bbox": [ + 121, + 130, + 488, + 150 + ], + "type": "text", + "content": "1. Write a detailed feedback that assess the quality of the response strictly based on the given score rubric, not evaluating in general." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 121, + 150, + 488, + 170 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 150, + 488, + 170 + ], + "spans": [ + { + "bbox": [ + 121, + 150, + 488, + 170 + ], + "type": "text", + "content": "2. After writing a feedback, write a score that is an integer between 1 and 5. You should refer to the score rubric." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 121, + 170, + 488, + 191 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 170, + 488, + 191 + ], + "spans": [ + { + "bbox": [ + 121, + 170, + 488, + 191 + ], + "type": "text", + "content": "3. The output format should look as follows: \"Feedback: (write a feedback for criteria) [RESULT] (an integer number between 1 and 5)\" 4. Please do not generate any other opening, closing, and explanations." + } + ] + } + ], + "index": 5 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 121, + 200, + 235, + 209 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 200, + 235, + 209 + ], + "spans": [ + { + "bbox": [ + 121, + 200, + 235, + 209 + ], + "type": "text", + "content": "The instruction to evaluate:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 121, + 210, + 160, + 221 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 210, + 160, + 221 + ], + "spans": [ + { + "bbox": [ + 121, + 210, + 160, + 221 + ], + "type": "text", + "content": "{question}" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 121, + 230, + 215, + 239 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 230, + 215, + 239 + ], + "spans": [ + { + "bbox": [ + 121, + 230, + 215, + 239 + ], + "type": "text", + "content": "##Response to evaluate:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 121, + 240, + 161, + 251 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 240, + 161, + 251 + ], + "spans": [ + { + "bbox": [ + 121, + 240, + 161, + 251 + ], + "type": "text", + "content": "{response}" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 121, + 259, + 241, + 270 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 259, + 241, + 270 + ], + "spans": [ + { + "bbox": [ + 121, + 259, + 241, + 270 + ], + "type": "text", + "content": "Reference Answer (Score 5):" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 121, + 270, + 164, + 281 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 270, + 164, + 281 + ], + "spans": [ + { + "bbox": [ + 121, + 270, + 164, + 281 + ], + "type": "text", + "content": "{reference}" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 121, + 290, + 190, + 300 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 290, + 190, + 300 + ], + "spans": [ + { + "bbox": [ + 121, + 290, + 190, + 300 + ], + "type": "text", + "content": "Score Rubrics:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 121, + 300, + 489, + 310 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 300, + 489, + 310 + ], + "spans": [ + { + "bbox": [ + 121, + 300, + 489, + 310 + ], + "type": "text", + "content": "[Does the model demonstrate logical and effective reasoning in its responses?]" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 121, + 310, + 489, + 329 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 310, + 489, + 329 + ], + "spans": [ + { + "bbox": [ + 121, + 310, + 489, + 329 + ], + "type": "text", + "content": "Score 1: The model's responses show a complete lack of logical reasoning, often resulting in irrelevant or nonsensical answers." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 121, + 329, + 489, + 350 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 329, + 489, + 350 + ], + "spans": [ + { + "bbox": [ + 121, + 329, + 489, + 350 + ], + "type": "text", + "content": "Score 2: The model occasionally shows signs of logical reasoning but generally struggles to provide coherent or relevant responses." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 121, + 350, + 489, + 369 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 350, + 489, + 369 + ], + "spans": [ + { + "bbox": [ + 121, + 350, + 489, + 369 + ], + "type": "text", + "content": "Score 3: The model usually demonstrates basic reasoning capabilities, though it may not consistently apply logical principles or fully resolve complex issues." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 121, + 369, + 489, + 389 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 369, + 489, + 389 + ], + "spans": [ + { + "bbox": [ + 121, + 369, + 489, + 389 + ], + "type": "text", + "content": "Score 4: The model frequently exhibits strong reasoning skills, effectively addressing complex questions with minor inconsistencies or errors." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 121, + 389, + 489, + 410 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 389, + 489, + 410 + ], + "spans": [ + { + "bbox": [ + 121, + 389, + 489, + 410 + ], + "type": "text", + "content": "Score 5: The model consistently demonstrates advanced reasoning abilities, providing logically sound, coherent, and sophisticated responses to complex queries." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 121, + 419, + 174, + 429 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 419, + 174, + 429 + ], + "spans": [ + { + "bbox": [ + 121, + 419, + 174, + 429 + ], + "type": "text", + "content": "Feedback:" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 233, + 458, + 376, + 470 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 233, + 458, + 376, + 470 + ], + "spans": [ + { + "bbox": [ + 233, + 458, + 376, + 470 + ], + "type": "text", + "content": "Figure 18: Prompt for Prometheus." + } + ] + } + ], + "index": 21, + "type": "text" + }, + { + "bbox": [ + 121, + 487, + 489, + 528 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 487, + 489, + 528 + ], + "spans": [ + { + "bbox": [ + 121, + 487, + 489, + 528 + ], + "type": "text", + "content": "You are a diligent and precise assistant tasked with evaluating the correctness of responses. You will receive a question, an output sentence, and the correct answer. Your task is to determine if the output sentence accurately answers the question based on the provided correct answer. Respond with either [Correct] or [Incorrect]." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 121, + 537, + 206, + 546 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 537, + 206, + 546 + ], + "spans": [ + { + "bbox": [ + 121, + 537, + 206, + 546 + ], + "type": "text", + "content": "Special considerations:" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 121, + 547, + 489, + 637 + ], + "type": "list", + "angle": 0, + "index": 28, + "blocks": [ + { + "bbox": [ + 121, + 547, + 489, + 577 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 547, + 489, + 577 + ], + "spans": [ + { + "bbox": [ + 121, + 547, + 489, + 577 + ], + "type": "text", + "content": "1. **Multiple Answers**: If the output contains multiple answers, evaluate whether later answers modify or correct earlier ones. In such cases, compare the final answer with the correct answer. If the final answer is unclear or incorrect, respond with [Incorrect]." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 121, + 577, + 489, + 597 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 577, + 489, + 597 + ], + "spans": [ + { + "bbox": [ + 121, + 577, + 489, + 597 + ], + "type": "text", + "content": "2. **Mathematical Problems**: If the formats differ but the answers are mathematically equivalent, respond with [Correct]." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 121, + 597, + 489, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 597, + 489, + 616 + ], + "spans": [ + { + "bbox": [ + 121, + 597, + 489, + 616 + ], + "type": "text", + "content": "3. **Explicit Options**: If the question provides explicit candidate answers, the output will be considered correct if it clearly indicates the correct option's code or the correct option's content." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 121, + 616, + 489, + 637 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 616, + 489, + 637 + ], + "spans": [ + { + "bbox": [ + 121, + 616, + 489, + 637 + ], + "type": "text", + "content": "4. **No Explicit Options**: If the question does not provide explicit options, the output must align with the correct answer in content and meaning to be considered [Correct]." + } + ] + } + ], + "index": 27 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 121, + 646, + 222, + 657 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 646, + 222, + 657 + ], + "spans": [ + { + "bbox": [ + 121, + 646, + 222, + 657 + ], + "type": "text", + "content": "Question: \"\"{question}\"\"" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 121, + 657, + 240, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 657, + 240, + 666 + ], + "spans": [ + { + "bbox": [ + 121, + 657, + 240, + 666 + ], + "type": "text", + "content": "Output sentence: \"\"{output}\"" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 121, + 666, + 216, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 666, + 216, + 677 + ], + "spans": [ + { + "bbox": [ + 121, + 666, + 216, + 677 + ], + "type": "text", + "content": "Correct answer: {answer}" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 121, + 677, + 165, + 687 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 677, + 165, + 687 + ], + "spans": [ + { + "bbox": [ + 121, + 677, + 165, + 687 + ], + "type": "text", + "content": "Judgement:" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 242, + 707, + 367, + 719 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 242, + 707, + 367, + 719 + ], + "spans": [ + { + "bbox": [ + 242, + 707, + 367, + 719 + ], + "type": "text", + "content": "Figure 19: Prompt for xVerify." + } + ] + } + ], + "index": 33, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "31" + } + ] + } + ], + "index": 34 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 30 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 72, + 506, + 149 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 72, + 506, + 149 + ], + "spans": [ + { + "bbox": [ + 104, + 72, + 506, + 149 + ], + "type": "text", + "content": "All models were executed on GPUs with identical configurations. Specifically, Prometheus-8x7B-v2.0, JudgeLM-33B-v1.0, CompassJudger-1-32B, xVerify-27B-I, and xVerify-32B-I were deployed on two GPUs for inference, while the remaining models were deployed on a single GPU. From Table 21, it is evident that all xVerify models exhibit an overall average runtime within 100 seconds, whereas the overall average runtime for the other judge models exceeds 100 seconds. Moreover, for each question category, the models with the shortest evaluation times are the xVerify models. Thus, the xVerify models demonstrably surpass the other judge models in terms of evaluation efficiency." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 154, + 506, + 233 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 154, + 506, + 233 + ], + "spans": [ + { + "bbox": [ + 104, + 154, + 506, + 233 + ], + "type": "text", + "content": "Table 22 presents the evaluation costs incurred when employing GPT-4o as the judge, based on assessments of 200 randomly selected samples per question type, along with the overall expenditure. Apart from the prerequisite deployment overhead, the cost of invoking the xVerify models for evaluation is substantially lower than that of GPT-4o. Additionally, compared to GPT-4o, which relies on remote server deployment, the locally deployed xVerify models offer higher invocation efficiency. Taken together, these results underscore that the xVerify models outperform the other judge models in both usage cost and evaluation efficiency." + } + ] + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 106, + 281, + 504, + 512 + ], + "blocks": [ + { + "bbox": [ + 104, + 247, + 504, + 280 + ], + "lines": [ + { + "bbox": [ + 104, + 247, + 504, + 280 + ], + "spans": [ + { + "bbox": [ + 104, + 247, + 504, + 280 + ], + "type": "text", + "content": "Table 21: Running Time Comparison of xVerify Models and Other Judge Models (200 Samples per Question Type). The best performance in each column is shown in **bold**, and the second-best performance is underlined." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 106, + 281, + 504, + 512 + ], + "lines": [ + { + "bbox": [ + 106, + 281, + 504, + 512 + ], + "spans": [ + { + "bbox": [ + 106, + 281, + 504, + 512 + ], + "type": "table", + "html": "
Method TypeMethodMultiple Choice (s)Math (s)Short Answer (s)Classification (s)Avg (s)
Judge ModelPandaLM-7B-v1304.5076.2476.9765.79130.88
Auto-J-Bilingual-6B1,570.441,802.711,194.081,148.321,428.89
Auto-J-13B3,055.003,622.702,807.231,903.002,846.98
Prometheus-7B-v2.01,173.80947.71706.74696.34881.15
Prometheus-8x7B-v2.01,557.101,128.081,132.84750.511,142.13
JudgeLM-7B-v1.0551.88469.10394.57348.05440.90
JudgeLM-13B-v1.0777.73598.19564.25529.60617.44
JudgeLM-33B-v1.01,041.831,018.37789.80762.99903.25
CompassJudger-1-1.5B189.45244.08139.50110.95171.00
CompassJudger-1-7B163.96568.72450.2080.58315.87
CompassJudger-1-14B346.80571.66217.86196.18333.13
CompassJudger-1-32B147.53258.10133.59152.11172.83
xVerifyxVerify-0.5B-I38.9741.2539.1238.8739.55
xVerify-1B-I33.9136.6333.4433.4734.36
xVerify-1.5B-I43.0546.8742.1742.0843.54
xVerify-2B-I38.4473.1639.2937.3847.07
xVerify-3B-Ia38.5444.5437.1143.0240.80
xVerify-3B-Ib46.9353.58106.0647.8463.60
xVerify-7B-I68.2495.5050.6651.6766.52
xVerify-8B-I78.0661.5745.3446.8257.95
xVerify-9B-C131.0770.1651.6652.5776.37
xVerify-9B-I54.2069.9149.4151.0656.15
xVerify-14B-Ia59.18114.9155.5054.8071.10
xVerify-14B-Ib61.17145.19116.4357.5595.09
xVerify-27B-I85.2889.4158.9961.0073.67
xVerify-32B-I131.0598.9964.7467.4590.56
", + "image_path": "fc0db9d96b61c24a2bb9520c503fadc3531bfe8b2e6d23d36abedd249b486d06.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 127, + 544, + 484, + 580 + ], + "blocks": [ + { + "bbox": [ + 132, + 526, + 476, + 539 + ], + "lines": [ + { + "bbox": [ + 132, + 526, + 476, + 539 + ], + "spans": [ + { + "bbox": [ + 132, + 526, + 476, + 539 + ], + "type": "text", + "content": "Table 22: Total costs (in USD) of GPT-4o as Judge (200 Samples per Question Type)." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 127, + 544, + 484, + 580 + ], + "lines": [ + { + "bbox": [ + 127, + 544, + 484, + 580 + ], + "spans": [ + { + "bbox": [ + 127, + 544, + 484, + 580 + ], + "type": "table", + "html": "
MethodMultiple Choice ($)Math ($)Short Answer ($)Classification ($)Total ($)
GPT-4o as Judge0.310.660.240.271.48
GPT-4o as Judge (CoT)0.551.000.420.482.45
", + "image_path": "022d0e835114f130f558611eb0eb1ecb4e306987c7a3bc23db29b6b363a9a7bb.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "32" + } + ] + } + ], + "index": 6 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 31 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2504_10xxx/2504.10483/9b7bb575-f36f-48cf-a562-8fb8239c8a45_content_list.json b/data/2025/2504_10xxx/2504.10483/9b7bb575-f36f-48cf-a562-8fb8239c8a45_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..4b30b8c507f4d67bba969a988118d9c9b473c3a8 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10483/9b7bb575-f36f-48cf-a562-8fb8239c8a45_content_list.json @@ -0,0 +1,2150 @@ +[ + { + "type": "text", + "text": "REPA-E: Unlocking VAE for End-to-End Tuning with Latent Diffusion Transformers", + "text_level": 1, + "bbox": [ + 140, + 128, + 854, + 174 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Xingjian Leng $^{\\alpha \\star}$ Jaskirat Singh $^{\\alpha \\star}$ Yunzhong Hou $^{\\alpha}$ Zhenchang Xing $^{\\beta}$ Saining Xie $^{\\chi}$ Liang Zheng $^{\\alpha}$", + "bbox": [ + 192, + 202, + 802, + 239 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{\\alpha}$ Australian National University $\\beta$ Data61 CSIRO $\\chi$ New York University", + "bbox": [ + 192, + 244, + 802, + 263 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "{xingjian.length\\*, jaskirat.singh\\*, yunzhong.hou, liang.zheng}@anu.edu.au", + "bbox": [ + 187, + 267, + 808, + 282 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "zhenchang.xing@data61.csiro.au saining.xie@nyu.edu", + "bbox": [ + 269, + 285, + 723, + 299 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/6e3d867ad461854a2b4897fe73b4d029c7a1b0fe01e5b82ec27c5b3c3fbac347.jpg", + "image_caption": [ + "a) Traditional LDM Training", + "Figure 1. Can we unlock VAE for end-to-end tuning with latent-diffusion models? - Traditional deep learning wisdom dictates that end-to-end training is often preferable when possible. However, latent diffusion models usually only update the generator network while keeping the variational auto-encoder (VAE) fixed (a). This is because directly using the diffusion loss to update the VAE (b) causes the latent space to collapse. We show that while direct diffusion-loss is ineffective, end-to-end training can be unlocked through the representation-alignment (REPA) loss - allowing both encoder and diffusion model to be jointly tuned during the training process (c). Notably, this allows for significantly accelerated training; speeding up training by over $17\\times$ and $45\\times$ over REPA and vanilla training recipes, respectively (d)." + ], + "image_footnote": [], + "bbox": [ + 98, + 321, + 225, + 500 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/a9541f9988db720abb731ca660a0d104cdb172ec8d219145d7c412f88112e0a4.jpg", + "image_caption": [ + "b) Naive End-to-End LDM Training", + "c) REPA-E (Ours)" + ], + "image_footnote": [], + "bbox": [ + 263, + 321, + 612, + 500 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/3d0d5d48be88986c8ac48ee99088103e4823610d4c41d445054fcca80dcf74c9.jpg", + "image_caption": [ + "d) Training Steps vs. FID-50K Improved Generation Performance" + ], + "image_footnote": [], + "bbox": [ + 643, + 325, + 901, + 497 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 246, + 628, + 326, + 643 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "In this paper we tackle a fundamental question: \"Can we train latent diffusion models together with the variational auto-encoder (VAE) tokenizer in an end-to-end manner?\" Traditional deep-learning wisdom dictates that end-to-end training is often preferable when possible. However, for latent diffusion transformers, it is observed that end-to-end training both VAE and diffusion-model using standard diffusion-loss is ineffective, even causing a degradation in final performance. We show that while diffusion loss is ineffective, end-to-end training can be unlocked through the representation-alignment (REPA) loss - allowing both VAE and diffusion model to be jointly tuned during the training process. Despite its simplicity, the proposed training recipe (REPA-E) shows remarkable performance; speeding up diffusion model training by over $17 \\times$ and $45 \\times$ over REPA and", + "bbox": [ + 86, + 661, + 485, + 888 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "vanilla training recipes, respectively. Interestingly, we observe that end-to-end tuning with REPA-E also improves the VAE itself; leading to improved latent space structure and downstream generation performance. In terms of final performance, our approach sets a new state-of-the-art; achieving FID of 1.12 and 1.69 with and without classifier-free guidance on ImageNet $256 \\times 256$ . Code is available at https://end2end-diffusion.github.io.", + "bbox": [ + 511, + 630, + 906, + 752 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 513, + 780, + 643, + 796 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "End-to-end training has propelled the field forward for the past decade. It is understood that incorporating more components into end-to-end training can lead to increased performance, as evidenced by the evolution of the RCNN family [14, 15, 38]. With that said, training schemes of latent diffusion models (LDMs) [40] remain two-stage: first, the variational auto-encoder (VAE) [22] is trained with the re", + "bbox": [ + 511, + 805, + 906, + 912 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.10483v3 [cs.CV] 22 Oct 2025", + "bbox": [ + 22, + 276, + 57, + 717 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "* Equal Contribution.", + "bbox": [ + 114, + 898, + 232, + 911 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 493, + 935, + 503, + 946 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/58248ecea17233f3151ed2c3fa2b6660f014b9cc50ef2d343f2fafeecf08c27b.jpg", + "image_caption": [ + "(a) PCA Analysis on VAE Latent Space Structure" + ], + "image_footnote": [], + "bbox": [ + 122, + 78, + 460, + 277 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/6ff1982f4ad41e9b7eb323cd44e02190df89a9a8943279c88efeddf464d1cd62.jpg", + "image_caption": [ + "(b) Performance Improvements with REPA-E (400K Steps)", + "Figure 2. End-to-End Training Automatically Improves VAE Latent-Space Structure. (a) Following [24], we visualize latent space structure from different VAEs before and after end-to-end training using principal component analysis (PCA) that projects them to three channels colored by RGB. We consider SD-VAE [40], and IN-VAE $^1$ , a $16 \\times$ downsampling, 32-channel VAE trained on ImageNet [6]. For SD-VAE we find that latent representations have high-frequency noise. Applying end-to-end tuning helps learning a more smooth and less noisy latent representation. Interestingly to the contrast, the latent space for IN-VAE is over-smoothed (e.g., row-2). Applying end-to-end tuning automatically helps learn a more detailed latent space structure to best support final generation performance. (b) Jointly tuning both VAE and latent diffusion model (LDM) significantly improves final generation performance (gFID) across different VAE architectures." + ], + "image_footnote": [], + "bbox": [ + 545, + 79, + 870, + 277 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "construction loss; then, the diffusion model is trained with the diffusion loss while keeping the VAE fixed (see Fig. 1a).", + "bbox": [ + 88, + 419, + 480, + 450 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The above two-stage division of the LDM training process, though popular, leads to a challenging optimization task: \"How to best optimize the representation from first stage (VAE) for optimal performance while training the second stage (diffusion model)?\" While recent works study the interplay between the performance of the two stages [24, 44], they are often limited to empirical analysis, which may vary depending on the architecture and training setting for both the VAE and the diffusion model. For instance, in a concurrent work [44] show that the latent space of popular autoencoders e.g., SD-VAE [40] suffer from high-frequency noise / components. However, as seen in Fig. 2 & 6, while the same holds for some VAEs (e.g. SD-VAE), it might not be true for other VAE architectures — which instead might suffer from an over-smoothed latent space (Fig. 2, 6).", + "bbox": [ + 88, + 452, + 482, + 679 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In this paper, we therefore ask a fundamental question: \"Can we jointly tune both VAE and LDM in an end-to-end manner to best optimize final generation performance?\" Technically, it is straightforward to do end-to-end LDM training by simply back-propagating the diffusion loss to the VAE tokenizer. However, experiments (§3) reveal that this naive approach for end-to-end training is ineffective. The diffusion loss encourages learning a simpler latent space structure which is easier for denoising objective (refer §3.1), but leads to reduced generation performance (Fig. 1d).", + "bbox": [ + 89, + 683, + 482, + 833 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To address this, we propose REPA-E; an end-to-end training recipe using representation alignment loss [54]. We show that while the diffusion loss is ineffective, end-to-end tuning can be unlocked through the recently proposed representation-alignment (REPA) loss - allowing both VAE", + "bbox": [ + 89, + 835, + 483, + 912 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "and diffusion model to be jointly tuned during training process. Through extensive evaluations, we demonstrate that end-to-end tuning with REPA-E offers several advantages; End-to-End Training Leads to Accelerated Generation Performance; speeding up diffusion training by over $17 \\times$ and $45 \\times$ over REPA and vanilla training recipes (Fig. 1d). Furthermore, it also helps significantly improve the final generation performance. For instance as seen in Fig. 1d, we find that when using the popular SiT-XL [30] architecture, REPA-E reaches an FID of 4.07 within 400K steps, significantly boosting final performance over even REPA which only only reaches a final FID for 5.9 after 4M steps [54].", + "bbox": [ + 511, + 419, + 906, + 601 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "End-to-End Training improves VAE latent-space structure. As seen in Fig. 2 and §4.4, we find that jointly tuning the VAE and latent diffusion model during training, automatically improves the latent space structure across different VAE architectures. For instance, for SD-VAE [40], it is observed that the original latent space suffers from high-frequency noise (Fig. 2). Applying end-to-end tuning helps learn a more smooth latent space representation. In contrast, the latent space for IN-VAE1 is over-smoothed. Applying REPA-E automatically helps learn more detailed latent space structure to best support generation performance. End-to-End Tuning Improves VAE Performance. Finally, we find that once tuned using REPA-E, the end-to-end tuned VAE can be used as a drop-in replacement for their original counterparts (e.g. SD-VAE) showing improved generation performance across diverse training settings and model architectures (refer §4.4).", + "bbox": [ + 511, + 601, + 908, + 857 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To summarize, key contributions of this paper are: 1) We propose REPA-E; an end-to-end training recipe for jointly", + "bbox": [ + 511, + 857, + 906, + 888 + ], + "page_idx": 1 + }, + { + "type": "page_footnote", + "text": "${}^{1}$ trained onImagenet at ${f16d32}$ using official training code from [40].", + "bbox": [ + 529, + 898, + 897, + 911 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "tuning both VAE and LDM using representation alignment loss (§3). 2) We find that despite its simplicity, REPA-E leads to accelerated generation performance; speeding up diffusion training by over $17 \\times$ and $45 \\times$ over REPA and vanilla training recipes, respectively (§4.2). 3) We show that end-to-end training is able to adaptively improve the latent space structure across diverse VAE architectures. 4) We demonstrate that once tuned using REPA-E, the end-to-end tuned VAE can be used as a drop-in replacement for their original counterparts (e.g., SD-VAE), exhibiting significantly better downstream generation performance (§4.4).", + "bbox": [ + 89, + 90, + 480, + 257 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2. Related Work", + "text_level": 1, + "bbox": [ + 89, + 271, + 232, + 286 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Tokenizer or autoencoders (AE) [3] use either the variational objective [22] for continuous tokenization or a vector quantization objective [9, 48] for discrete tokenization [8-10, 16, 21, 22, 36, 40, 48, 53, 55]. However, current tokenizers are primarily trained for minimizing the reconstruction error, which maybe not provide the optimal latent space for generation [24]. We show that improved latent space structure is achieved by end-to-end training of LDMs.", + "bbox": [ + 89, + 297, + 482, + 417 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Latent diffusion models leverage pre-trained image tokenizers to compress images into a lower-dimensional latent space to simplify the generative task [5, 10, 10, 11, 26, 32, 36, 40, 43, 47]. Despite their effectiveness, existing tokenizers and diffusion models are trained separately [10, 36, 40]. In this paper, we explore jointly optimizing tokenizers and diffusion models to achieve faster convergence and improved generation performance (Sec. 4).", + "bbox": [ + 89, + 417, + 482, + 539 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Representation alignment for generative learning has recently shown huge promise for improving the training speed and performance of diffusion models [35, 50, 54]. We find that instead of applying the REPA loss separately over LDM [54] or VAE [50], significantly better performance and training speed can be achieved through E2E training.", + "bbox": [ + 89, + 539, + 482, + 630 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "End-to-End Diffusion. LSGM [47] explores joint training with score-based generative models, which uses a variational lower bound objective with an entropy term for preventing latent space collapse while backpropagating the diffusion loss. We empirically find that while this helps prevent latent space collapse, REPA-E shows significantly faster convergence during E2E training (refer App. B).", + "bbox": [ + 89, + 630, + 482, + 736 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. REPA-E: Unlocking VAE for Joint Training", + "text_level": 1, + "bbox": [ + 89, + 750, + 480, + 767 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Overview. Given a variational autoencoder (VAE) and latent diffusion transformer (e.g., SiT [30]), we wish to jointly tune the VAE latent representation and diffusion model features in an end-to-end manner to best optimize the final generation performance. To this end, we first make three key insights in §3.1: 1) Naive end-to-end tuning - directly back-propagating the diffusion loss to the VAE is ineffective. The diffusion loss encourages learning a more simpler latent space structure (Fig. 3a) which is easier for min", + "bbox": [ + 89, + 775, + 482, + 912 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "imizing the denoising objective [40], but degrades the final generation performance. We next analyze the recently proposed representation-alignment loss [54] showing that; 2) Higher representation-alignment score [54] correlates with improved generation performance (Fig. 3b). This offers an alternate path for improving final generation performance using representation-alignment score as a proxy. 3) The maximum achievable alignment score with vanilla-REPA is bottlenecked by the VAE latent space features. We further show that backpropagating the REPA loss to the VAE during training can help address this limitation, significantly improving final representation-alignment score (Fig. 3c).", + "bbox": [ + 511, + 90, + 903, + 272 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Given the above insights, we finally propose REPA-E (§3.2); an end-to-end tuning recipe for both VAE and LDM features. Our key idea is simple: instead of directly using diffusion loss for end-to-end tuning, we can use the representation alignment score as a proxy for the final generation performance. This motivates our final approach, where instead of the diffusion loss, we propose to perform end-to-end training using the representation-alignment loss. The end-to-end training with REPA loss helps better improve the final representation-alignment score (Fig. 3b), which in turn leads to improved final generation performance (§3.1).", + "bbox": [ + 511, + 273, + 903, + 439 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1. Motivating End-to-End Training with REPA", + "text_level": 1, + "bbox": [ + 511, + 450, + 890, + 465 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Naive End-to-End Tuning is Ineffective. We first analyze the naive approach for end-to-end tuning; directly backpropagating the diffusion loss to the VAE tokenizer. As shown in Fig. 3a, we observe that directly backpropagating the diffusion loss encourages learning a more simpler latent space structure with lower variance along the spatial dimensions (Tab. 10). The simpler latent-space structure poses an easier problem for the denoising objective [40], but leads to reduced generation performance (Fig. 1). Consider an intermediate latent $z_{t} = \\alpha_{t}z_{\\mathrm{VAE}} + \\sigma_{t}\\epsilon_{orig}$ for any timestep $t$ . The denoising objective [34] mainly aims to predict $\\epsilon_{pred}$ ; estimating the originally added noise $\\epsilon_{orig}$ from VAE features $z_{\\mathrm{VAE}}$ and timestep $t$ . As the variance along the spatial dimensions for VAE latent $z_{\\mathrm{VAE}}$ goes down, the denoising objective effectively reduces to predicting a bias term for recovering back the originally added noise $\\epsilon_{orig}$ . Thus, backpropagation the diffusion loss effectively hacks the latent space structure to create an easier denoising problem, but leads to a reduced generation performance (Fig. 1). Higher Representation Alignment Correlates with Better Generation Performance. Similar to the findings of [54], we also measure representation alignment using CKNNA scores [19] across different model sizes and training iterations. As seen in Fig. 3b, we observe that higher representation alignment during the training process leads to improved generation performance. This suggests an alternate path for improving generation performance by using the representation alignment objective instead of the diffusion loss for end-to-end training (refer §3.2).", + "bbox": [ + 511, + 473, + 903, + 912 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 936, + 503, + 946 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/aed0aebb7aa7f26a90775c3d741b04e5ee30a74fde9c4116c849efd5d8dfb03f.jpg", + "image_caption": [ + "RGB", + "Image" + ], + "image_footnote": [], + "bbox": [ + 93, + 95, + 156, + 145 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/678247a76edebf75eef996545bf9f71dcd7acf3febf0a69f2a6874afe513f021.jpg", + "image_caption": [ + "SDVAE", + "w/o E2E" + ], + "image_footnote": [], + "bbox": [ + 158, + 95, + 222, + 145 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/737ee93bcfc4726d031fc27ae03451e0ae5dc24d68c85911901ac5255cefdf3a.jpg", + "image_caption": [ + "E2E with", + "REPA Loss" + ], + "image_footnote": [], + "bbox": [ + 223, + 95, + 289, + 145 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/ecca1ad26ed6740024c2b424367c42553d5a44b65aea95da9b3a313377175628.jpg", + "image_caption": [ + "E2E with", + "Diff, Loss" + ], + "image_footnote": [], + "bbox": [ + 290, + 95, + 354, + 145 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/3a884ef9a7201faa6ce809763a9801e6306cc17f1db558b545d4255c6db7f8fb.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 94, + 145, + 156, + 195 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/3b2ef12788c4a2120146d9f5d497dc212500338f2b6db3d99890fedb7710f1b6.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 158, + 145, + 222, + 195 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/6d94cb8882bfecafaab1bbb1acfc06fe066cc5249188812276c3541185a1e8ca.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 223, + 145, + 289, + 195 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/2062b30ebd77b104482e0ace1af3c36cfdc092c6d22e5d9f827fdc42a5cdf62f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 290, + 145, + 354, + 195 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/026a2509afc429c08ebf20dbfc03dde3f7f9882b223f893a1155f70e028d28db.jpg", + "image_caption": [ + "(a) PCA Visualization of Latent Spaces" + ], + "image_footnote": [], + "bbox": [ + 94, + 195, + 156, + 244 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/d1e9c9c23709cb744c68d3c891fec414dcd095ab25592add4488c6349b77a306.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 158, + 195, + 222, + 244 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/ae8c4a8f75e3bc72d7182c5a9a929d9d0aa2a4238ec623bfea7b8c6ef63012e0.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 223, + 195, + 289, + 244 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/96122cd52837ce36699d9fc4d592f85bbb3c75af36fa0f639b4dd5f6896b3775.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 290, + 195, + 354, + 244 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/4084e4f8a694c979c3b8b8e5e18a0d0a6961457d400ae16e873a14a36c04d873.jpg", + "image_caption": [ + "(b) Correlation: gFID & CKNNA Score" + ], + "image_footnote": [], + "bbox": [ + 369, + 82, + 576, + 244 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/3af1810a7f98b4e4da924a364be17b210eb220e9b0b6c4f9bb76ce63a6aa8af7.jpg", + "image_caption": [ + "(c) E2E tuning with REPA improves CKNNA Score", + "Figure 3. Motivating End-to-End Tuning using Representation Alignment (REPA) Loss. We make three key insights: 1) Naive end-to-end (E2E) tuning using diffusion loss is ineffective. The diffusion encourages learning a more simpler latent space structure (a) which is easier for denoising objective (refer §3.1) but degrades final generation performance (Fig. 1). We next analyze the recently proposed representation alignment (REPA) loss [54] showing: 2) Higher representation alignment (CKNNA) leads to better generation performance. This suggests an alternate path for improving performance by using representation-alignment (CKNNA) as proxy for generation performance. 3) The maximum achievable CKNNA score with vanilla-REPA is bottlenecked by the VAE features (c) saturating around $\\sim 0.42$ . Back-propagating the REPA-loss to the VAE helps address this limitation and improve the final CKNNA score. Given the above insights: we propose REPA-E ( $\\S 3.2$ ) for end-to-end LDM training. The key idea is simple: instead of using the diffusion loss, we perform end-to-end training using the REPA loss. The end-to-end training with REPA loss helps improve the final representation-alignment (CKNNA), which in turn leads to improved generation performance ( $\\S 4$ )." + ], + "image_footnote": [], + "bbox": [ + 581, + 94, + 895, + 247 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Representation Alignment is Bottlenecked by the VAE Features. Fig. 3c shows that while the naive application of REPA loss [54] leads to improved representation-alignment (CKNNA) score, the maximum achievable alignment score is still bottlenecked the VAE features saturating around a value of 0.4 (maximum value of 1). Furthermore, we find that backpropagating the representation-alignment loss to the VAE helps address this limitation; allowing end-to-end optimization of the VAE features to best support representation-alignment objective [54].", + "bbox": [ + 89, + 419, + 482, + 570 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2. End-to-End Training with REPA", + "text_level": 1, + "bbox": [ + 89, + 584, + 377, + 599 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Given the above insights, we next propose REPA-E (§3.2); an end-to-end tuning recipe for jointly training both VAE and LDM features. Instead of directly using diffusion loss, we propose to perform end-to-end training using the representation-alignment loss. The end-to-end training with REPA loss helps better improve the final representation-alignment score (Fig. 3c), which in turn leads to improved final generation performance (refer §4.2). We next discuss key details for implementation of REPA-E for training.", + "bbox": [ + 89, + 608, + 482, + 743 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Batch-Norm Layer for VAE Latent Normalization. To enable end-to-end training, we first introduce a batchnorm layer between the VAE and latent diffusion model (Fig. 1). Typical LDM training involves normalizing the VAE features using precomputed latent statistics (e.g., std $= 1 / 0.1825$ for SD-VAE [40]). This helps normalize the VAE latent outputs to zero mean and unit variance for more efficient training for the diffusion model. However, with end-to-end training the statistics need to be recomputed whenever the VAE model is updated - which is expensive. To address this, we propose the use of a batch", + "bbox": [ + 89, + 746, + 482, + 912 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "norm layer [20] which uses the exponential moving average (EMA) mean and variance as a surrogate for dataset-level statistics. The batch-norm layer thus acts as a differentiable normalization operator without the need for recomputing dataset level statistics after each optimization step.", + "bbox": [ + 511, + 419, + 903, + 494 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "End-to-End Representation-Alignment Loss. We next enable end-to-end training, by using the REPA loss [54] for updating the parameters for both VAE and LDM during training. Formally, let $\\mathcal{V}_{\\phi}$ represent the VAE, $\\mathcal{D}_{\\theta}$ be the diffusion model, $f$ be the fixed pretrained perceptual model (e.g., DINO-v2 [33]) for REPA [54] and $\\mathbf{x}$ be a clean image. Also similar to REPA, consider $h_{\\omega}(\\mathbf{h}_t)$ be the projection of diffusion transformer output $\\mathbf{h}_t$ through a trainable projection layer $h_{\\omega}$ . We then perform end-to-end training by applying the REPA loss over both LDM and VAE as,", + "bbox": [ + 511, + 494, + 905, + 647 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\mathrm {R E P A}} (\\theta , \\phi , \\omega) = - \\mathbb {E} _ {\\mathbf {x}, \\epsilon , t} \\left[ \\frac {1}{N} \\sum_ {n = 1} ^ {N} \\operatorname {s i m} \\left(\\mathbf {y} ^ {[ n ]}, h _ {\\omega} \\left(\\mathbf {h} _ {t} ^ {[ n ]}\\right)\\right) \\right],\n$$\n", + "text_format": "latex", + "bbox": [ + 514, + 652, + 901, + 694 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\mathbf{y} = f(\\mathbf{x})$ is the output of the pretrained perceptual model (e.g., DINO-v2 [33]), $N$ is number of patches, $\\mathrm{sim}(< ., . >)$ computes the patch-wise cosine similarities between pretrained representation $\\mathbf{y}$ from perceptual model (e.g., DINO-v2) and diffusion transformer hidden state $\\mathbf{h}_t$ .", + "bbox": [ + 511, + 700, + 903, + 775 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Diffusion Loss with Stop-Gradient. As discussed in Fig. 3a and §3.1, backpropagating the diffusion loss to the VAE causes a degradation of latent-space structure. To avoid this, we introduce a simple stopgrad operation which limits the application of diffusion loss $\\mathcal{L}_{\\mathrm{DIFF}}$ to only the parameters $\\theta$ of the latent diffusion model $\\mathcal{D}_{\\theta}$ .", + "bbox": [ + 511, + 776, + 905, + 866 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "VAE Regularization Losses. Finally, we introduce regularization losses $\\mathcal{L}_{\\mathrm{REG}}$ for VAE $\\nu_{\\phi}$ , to ensure that the end-to-end training process does not impact the reconstruction", + "bbox": [ + 511, + 867, + 903, + 912 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 493, + 935, + 503, + 946 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/b0f6d7040a425954eeab73ec3015199d815cfcfb9afc1f1b22d8e551e28f6b7f.jpg", + "image_caption": [ + "Figure 4. End-to-End Tuning (REPA-E) Improves Visual Scaling. We observe that REPA-E produces higher-quality images at $400\\mathrm{K}$ steps compared with the vanilla-REPA and generates more structurally meaningful images even in the early stages of training. Results for both methods are sampled using the same seed, noise and class label. We use a classifier-free guidance scale of 4.0 during sampling." + ], + "image_footnote": [], + "bbox": [ + 98, + 77, + 906, + 378 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "performance (rFID) of the original VAE. In particular, following [1], we use three losses, 1) Reconstruction Losses $(\\mathcal{L}_{\\mathrm{MSE}},\\mathcal{L}_{\\mathrm{LPIPS}})$ , 2) GAN Loss $(\\mathcal{L}_{\\mathrm{GAN}})$ , 3) KL divergence loss $(\\mathcal{L}_{\\mathrm{KL}})$ as regularization loss $\\mathcal{L}_{\\mathrm{REG}}$ for the VAE $\\nu_{\\phi}$ .", + "bbox": [ + 89, + 440, + 482, + 500 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Overall Training. The overall training is then performed in an end-to-end manner using the following loss,", + "bbox": [ + 89, + 501, + 482, + 532 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} (\\theta , \\phi , \\omega) = \\mathcal {L} _ {\\mathrm {D I F F}} (\\theta) + \\lambda \\mathcal {L} _ {\\mathrm {R E P A}} (\\theta , \\phi , \\omega) + \\eta \\mathcal {L} _ {\\mathrm {R E G}} (\\phi),\n$$\n", + "text_format": "latex", + "bbox": [ + 93, + 542, + 478, + 560 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\theta, \\phi, \\omega$ refer to the parameters for the LDM, VAE and trainable REPA projection layer [54], respectively. Further implementation details are provided in §4.1 and Appendix.", + "bbox": [ + 89, + 570, + 483, + 617 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4. Experiments", + "text_level": 1, + "bbox": [ + 89, + 628, + 223, + 646 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We next validate the performance of REPA-E and the effect of proposed components through extensive evaluation. In particular, we investigate three key research questions:", + "bbox": [ + 89, + 654, + 482, + 699 + ], + "page_idx": 4 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. Can REPA-E significantly improve generation performance and training speed? (Sec. 4.2, Tab. 1, Fig. 1, 4)", + "2. Does REPA-E generalize across variations in training settings including model-scale, architecture, encoder model for REPA etc.? (Sec. 4.3, Tab. 2, 3, 4, 5, 6, 7)", + "3. Analyze the impact of end-to-end tuning (REPA-E) on VAE latent-space structure and downstream generation performance. (please refer Sec. 4.4, Fig. 6, Tab. 8, 9)" + ], + "bbox": [ + 89, + 699, + 483, + 821 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.1. Setup", + "text_level": 1, + "bbox": [ + 89, + 829, + 171, + 845 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Implementation Details. We follow the same setup as in SiT [30] and REPA [54] unless otherwise specified. All training is conducted on the ImageNet [6] training split. We adopt the same data preprocessing protocol as", + "bbox": [ + 89, + 851, + 483, + 912 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "in ADM [7], where original images are center-cropped and resized to $256 \\times 256$ resolution. We experiment with publicly available VAEs, including SD-VAE (f8d4) [40], VA-VAE (f16d32) [40], and our own f16d32 VAE trained on ImageNet, referred to as IN-VAE. Depending on the VAE downsampling rate, we adopt SiT-XL/1 and SiT-XL/2 for $4 \\times$ and $16 \\times$ downsampling rates, respectively, where 1 and 2 denote the patch sizes in the transformer embedding layer. We disable affine transformations in the BN [20] layer between the VAE and SiT, relying solely on the running mean and standard deviation. The VAE regularization loss combines multiple objectives and is defined as: $\\mathcal{L}_{\\mathrm{REG}} = \\mathcal{L}_{\\mathrm{KL}} + \\mathcal{L}_{\\mathrm{MSE}} + \\mathcal{L}_{\\mathrm{LPIPS}} + \\mathcal{L}_{\\mathrm{GAN}}$ . For alignment loss, we use DINOv2 [33] as external visual features and apply alignment to the eighth layer of the SiT model. Empirically, we set the alignment loss coefficient to $\\lambda_{\\mathrm{REPA}_g} = 0.5$ for updating SiT and $\\lambda_{\\mathrm{REPA}_v} = 1.5$ for VAE. For optimization, we use AdamW [23, 29] with a constant learning rate of $1 \\times 10^{-4}$ , and a global batch size of 256. During training, we apply gradient clipping and exponential moving average (EMA) to the generative model for stable optimization. All experiments are conducted on 8 NVIDIA H100 GPUs.", + "bbox": [ + 511, + 440, + 906, + 772 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Evaluation. For image generation evaluation, we strictly follow the ADM setup [7]. We report generation quality using Fréchet inception distance (gFID) [17], structural FID (sFID) [31], inception score (IS) [42], precision (Prec.) and recall (Rec.) [25], measured on 50K generated images. For sampling, we follow the approach in SiT [30] and REPA [54], using the SDE Euler-Maruyama sampler with 250 steps. In terms of VAE benchmark, we measure the reconstruction FID (rFID) on 50K images from the Im", + "bbox": [ + 511, + 775, + 908, + 912 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/c0ccebd2a37cbf0f831d5a90307f3271837c5bd97b301ad920db8dddccafe7f4.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 112, + 88, + 238, + 186 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/591dcd624ebe552ccf98780d13bf06ce9ec7c6a6452497a963591a1120d0e0bc.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 241, + 88, + 367, + 186 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/98d5346a3f5d469ff855020e5e08533d39161900a58da1f5b6d2bc7be4a914b7.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 370, + 88, + 496, + 186 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/fd12988f16d053f4c42208367c590281d1f1ccceeec126036badaef31a99e14a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 500, + 88, + 625, + 186 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/ffb775a5a42001303337822551611cf8813cb9ca3412dea2c0caff6206c92060.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 629, + 88, + 754, + 186 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/9b918e4e0af32f5170c59f334fc411b4925cc05d2d86c173ad1e1740e0c6ca05.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 756, + 88, + 883, + 186 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/732c2cd3f89282ebd0a31c3d19337f2d8bfc6478b1fdfeb81e104bd6b2f3b41d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 112, + 186, + 238, + 285 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/2578ec0c923a1161d476acd025b22fe4449057186fb959695c72d26185dc88cf.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 241, + 186, + 367, + 285 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/b04f9900754a787b98e10d234ab8b15d8eca65c470b50220f75f889f9a11f468.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 370, + 186, + 496, + 285 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/938d6a0dae447cad9ea39e63107549156e33218b9ca6a3565b0607e1df7efb1c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 500, + 186, + 625, + 285 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/70e94662b383073ac040caac999f97c380f6c9dac51ab4b893a0a6a3f105693c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 629, + 186, + 754, + 285 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/be5f24596e485bced0e6830cd32cfd126187b4d5cb069e03094ce1e4cef97c4d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 756, + 186, + 883, + 285 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/23a834c33b92caf6f54237aa86afb54c1667eec855ce1c31c92f32512830e58c.jpg", + "image_caption": [ + "Figure 5. Qualitative Results on Imagenet $256 \\times 256$ using E2E-VAE and SiT-XL. We use a classifier-free guidance scale $\\alpha_{\\mathrm{cfg}} = 4.0$ ." + ], + "image_footnote": [], + "bbox": [ + 112, + 287, + 238, + 383 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/10a3b80800ca4cbbe8a8383df426c38aec1dc6cebe1d8c22ac60df810291d921.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 241, + 286, + 367, + 383 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/e6f4ef56876b31e928da107740a6f9255035f413280dccaa59cbef9c0972b9c6.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 370, + 286, + 496, + 383 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/d01256bacff19b6a3cc55c94487b9b303cf3b4501268a5a38f314e22bf356833.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 500, + 286, + 625, + 383 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/04d75970ad292add98f1113716832632b5e8e141ef16b0ddb372ed0b172873c5.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 629, + 286, + 754, + 383 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/3424b3de6794f2e2560bbb40c232b2c343909d6fe3029b8fca2169b1dde57471.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 756, + 286, + 883, + 383 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/ebd09afb994a91ca2cde8433cbeccb25b078c9df362e91163c7c0a9f19d05664.jpg", + "table_caption": [], + "table_footnote": [ + "Table 1. REPA-E for Accelerated Generation Performance. End-to-End training with REPA-E achieves significantly better performance (lower gFID) while using fewer epochs. Notably, REPA-E with only 80 epochs surpasses vanilla REPA using $10 \\times$ epochs. * indicates that VAE is updated during end-to-end training. All results are w/o classifier-free guidance on ImageNet 256 × 256. Additional system-level comparisons with classifier-free guidance and state-of-the-art results are provided in Tab. 9." + ], + "table_body": "
MethodTokenizerEpochsgFID↓sFID↓IS↑
Without End-to-End Tuning
MaskDiT [56]SD-VAE16005.6910.34177.9
DiT [34]14009.626.85121.5
SiT [30]14008.616.32131.7
FasterDiT [51]4007.915.45131.3
REPA [54]SD-VAE2019.406.0667.4
4011.106.0667.4
807.905.06122.6
8005.905.73157.8
With End-to-End Tuning (Ours)
REPA-ESD-VAE*2012.835.0488.8
407.174.39123.7
804.074.60161.8
", + "bbox": [ + 93, + 435, + 480, + 643 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "ageNet [6] validation set at a resolution of $256 \\times 256$ .", + "bbox": [ + 89, + 768, + 446, + 782 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.2. Impact on Training Performance and Speed", + "text_level": 1, + "bbox": [ + 89, + 796, + 464, + 811 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We first analyze the impact of end-to-end tuning using REPA-E (Sec. 3.2) for improving generation performance and speed when training latent-diffusion transformers.", + "bbox": [ + 89, + 819, + 482, + 864 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Quantitative Evaluation. We compare REPA-E against various latent diffusion model (LDM) baselines in Tab. 1. We evaluate models of similar sizes ( $\\sim$ 675M parameters)", + "bbox": [ + 89, + 866, + 483, + 912 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/9a812b0e81a6130477702ee4c4a9eb833570d2120becb8138fb64bcbdeb3bbb7.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Diff. ModelgFID↓sFID↓IS↑Prec.↑Rec.↑
SiT-B (130M)49.57.0027.50.460.59
+REPA-E (Ours)34.86.3139.10.570.59
SiT-L (458M)24.16.2555.70.620.60
+REPA-E (Ours)16.35.6975.00.680.60
SiT-XL (675M)19.46.0667.40.640.61
+REPA-E (Ours)12.85.0488.80.710.58
", + "bbox": [ + 517, + 435, + 898, + 547 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Table 2. Variation in Model-Scale. We find that REPA-E brings substantial performance improvements across all model-scales. All baselines are reported using vanilla-REPA [54] for training.", + "bbox": [ + 511, + 551, + 905, + 595 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "on ImageNet $256 \\times 256$ generation task. All results are reported without classifier-free guidance [18] using popular SiT-XL [30] model for training. We make two observations; 1) End-to-End tuning leads to faster training: consistently improving generation FID (gFID) from $19.40 \\rightarrow 12.83$ (20 epochs), $11.10 \\rightarrow 7.17$ (40 epochs), and $7.90 \\rightarrow 4.07$ (80 epochs), even when comparing with REPA [54]. 2) End-to-End training leads to better final performance: REPA-E at 80 epochs surpasses FasterDiT [51] ( $gFID = 7.91$ ) trained for 400 epochs and even MaskDiT [56], DiT [34], and SiT [30] which are trained over 1400 epochs. For instance, REPA-E reaches an FID of 4.07 within 400K steps, significantly boosting final performance over even REPA which only reaches a final FID for 5.9 after 4M steps [54].", + "bbox": [ + 511, + 607, + 906, + 819 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Qualitative Evaluation. We provide qualitative comparisons between REPA [54] and REPA-E in Fig. 4. We generate images from the same noise and label using checkpoints at $50\\mathrm{K}$ , $100\\mathrm{K}$ , and $400\\mathrm{K}$ training iterations, respectively. As seen in Fig. 4, we observe that REPA-E demonstrates superior image generation quality compared to the", + "bbox": [ + 511, + 821, + 905, + 912 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 493, + 936, + 504, + 946 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/70ad31221b2f68af8bce71e05adc8b7df7169655277a3c20d0ba2992aa8a31cc.jpg", + "image_caption": [ + "(a) PCA Visualization of Latent Space Structure [24]" + ], + "image_footnote": [], + "bbox": [ + 91, + 74, + 415, + 281 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/77c564390a6c95f2c38a5d95ea756c20cec3d4210e716525a358c5d26a1aea66.jpg", + "image_caption": [ + "(b) Impact of End-to-End Tuning for Automatically Improving Latent Space Structure", + "Figure 6. End-to-End Training Improves Latent Space Structure. (a) We observe that the latent space of pretrained VAEs can suffer either high noise components (e.g., SDXL-VAE, SD-VAE [40]), or, be over-smoothed and lack details (e.g., VA-VAE [50]). (b) The use of end-to-end tuning ( $\\S 3.2$ ) automatically helps improve the latent space structure in a model-agnostic manner across different VAE architectures. For instance, similar to findings of concurrent work [44], we observe that SD-VAE suffers from high noise components in the latent space. Applying end-to-end training automatically helps adjust the latent space to reduce noise. In contrast, other VAEs such as recently proposed VA-VAE [50] suffer from an over-smoothed latent space. The use of end-to-end tuning with REPA-E automatically helps learn a more detailed latent-space structure to best support generation performance." + ], + "image_footnote": [], + "bbox": [ + 446, + 74, + 903, + 280 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/4a22383a153c124e4af46b489760f62ae9e0676c1257c25773a561a405c1f621.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Target Repr.gFID↓sFID↓IS↑Prec.↑Rec.↑
I-JEPA-H [2]23.05.8160.30.620.60
+REPA-E (Ours)16.55.1873.60.680.60
CLIP-L [37]29.25.9846.40.590.61
+REPA-E (Ours)23.46.4457.10.620.60
DINOv2-B [33]24.16.2555.70.620.60
+REPA-E (Ours)16.35.6975.00.680.60
DINOv2-L [33]23.35.8959.90.610.60
+REPA-E (Ours)16.05.5977.70.680.58
", + "bbox": [ + 94, + 412, + 478, + 555 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/4a1821b1fab0a4d9974dccd6b9994ef489d78001f259fa1cdc2878fe9134dcfe.jpg", + "table_caption": [ + "Table 3. Variation in Representation Encoder. REPA-E yields consistent performance improvements across different choices for the representation-encoder used for representation-alignment [54]. All baselines are reported using vanilla-REPA [54] for training." + ], + "table_footnote": [], + "table_body": "
AutoencodergFID↓sFID↓IS↑Prec.↑Rec.↑
SD-VAE [40]24.16.2555.70.620.60
+REPA-E (Ours)16.35.6975.00.680.60
IN-VAE (f16d32)22.75.4756.00.620.62
+REPA-E (Ours)12.75.5784.00.690.62
VA-VAE [50]12.86.4783.80.710.58
+REPA-E (Ours)11.15.3188.80.720.61
", + "bbox": [ + 94, + 623, + 478, + 736 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "REPA baseline, while also generating more structurally meaningful images during early stages of training process.", + "bbox": [ + 89, + 773, + 482, + 805 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.3. Generalization and Scalability of REPA-E", + "text_level": 1, + "bbox": [ + 89, + 814, + 449, + 830 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We next analyze the generalization of the proposed approach to variation in training settings including model-size, tokenizer architecture, representation encoder, alignment depth [54] etc. Unless otherwise specified, all analysis and ablations use SiT-L [30] as the generative model,", + "bbox": [ + 89, + 835, + 483, + 912 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/adaae38fc3e6ec577cf064cdecf30feb67e4483e73d3134e2edd66601a68e38b.jpg", + "table_caption": [ + "Table 4. Variation in VAE Architecture. All baselines are reported using vanilla-REPA [54] for training." + ], + "table_footnote": [], + "table_body": "
Aln. DepthgFID↓sFID↓IS↑Prec.↑Rec.↑
6th layer23.05.7259.20.620.60
+REPA-E (Ours)16.46.6474.30.670.59
8th layer24.16.2555.70.620.60
+REPA-E (Ours)16.35.6975.00.680.60
10th layer23.75.9156.90.620.60
+REPA-E (Ours)16.25.2274.70.680.58
", + "bbox": [ + 517, + 412, + 898, + 525 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/5236958fea9f022969d3155f5426594fa733d4d77a23e4336c7f34abd68a2ea8.jpg", + "table_caption": [ + "Table 5. Variation in Alignment Depth. End-to-End tuning (REPA-E) gives consistent performance imrpoements over original REPA [54] across varying alignment-depths." + ], + "table_footnote": [], + "table_body": "
ComponentgFID↓sFID↓IS↑Prec.↑Rec.↑
w/o stopgrad444.1460.31.490.000.00
w/o batch-norm18.15.3272.40.670.59
w/o LGAN19.26.4768.20.640.58
REPA-E (Ours)16.35.6975.00.680.60
", + "bbox": [ + 514, + 578, + 901, + 654 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 6. Ablation Study on Role of Different Components.", + "bbox": [ + 526, + 656, + 890, + 670 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "SD-VAE as the VAE, and DINOv2-B [33] as the pretrained vision model for REPA loss [54]. Default REPA alignment-depth of 8 is used. We train each variant for 100K iterations and report results without classifier-free guidance [18]. All baseline numbers are reported using vanilla REPA and compared with end-to-end training using REPA-E.", + "bbox": [ + 511, + 683, + 906, + 773 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Impact of Model Size. Tab. 2 compares SiT-B, SiT-L, and SiT-XL to evaluate the effect of model size. We make two key observations. First, across all configurations, REPA-E consistently improves performance over the REPA baseline. Specifically, it reduces gFID from $49.5 \\rightarrow 34.8$ for SiT-B, $24.1 \\rightarrow 16.3$ for SiT-L, and $19.4 \\rightarrow 12.8$ for SiT-XL, demonstrating the effectiveness. Second, surprisingly the percentage gains in gFID achieved with REPA-E (over REPA) improve with increasing model size. For in-", + "bbox": [ + 511, + 775, + 908, + 912 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/5afd84a905078fcc6a268b92ea0c21c222abc451154a92f902aa8edef8767ab1.jpg", + "table_caption": [], + "table_footnote": [ + "Table 7. End-to-End Training from Scratch. We find that while initializing the VAE with pretrained weights (SD-VAE [40]) helps slightly improve performance, REPA-E can be used to train both VAE and LDM from scratch in an end-to-end manner; still achieving significantly superior performance over REPA which requires a separate stage for training VAE in addition to LDM training." + ], + "table_body": "
MethodgFID↓sFID↓IS↑Prec.↑Rec.↑
100K Iterations (20 Epochs)
REPA [54]19.406.0667.40.640.61
REPA-E (scratch)14.127.8783.50.700.59
REPA-E (VAE init.)12.835.0488.80.710.58
200K Iterations (40 Epochs)
REPA [54]11.105.05100.40.690.64
REPA-E (scratch)7.546.17120.40.740.61
REPA-E (VAE init.)7.174.39123.70.740.62
400K Iterations (80 Epochs)
REPA [54]7.905.06122.60.700.65
REPA-E (scratch)4.344.44154.30.750.63
REPA-E (VAE init.)4.074.60161.80.760.62
", + "bbox": [ + 91, + 88, + 480, + 291 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "stance, for SiT-B model REPA-E leads to a $29.6\\%$ improvement in gFID over REPA. Surprisingly even more gains are achieved for bigger models improving gFID by $32.3\\%$ and $34.0\\%$ for SiT-L and SiT-XL models respectively. This trend highlights the scalability of REPA-E; larger models achieve better percentage gains over vanilla-REPA.", + "bbox": [ + 89, + 396, + 480, + 488 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Variation in Representation Encoder. We report results across different perception model encoders (CLIP-L, I-JEPA-H, DINOv2-B, and DINOv2-L) Tab. 3. We observe that REPA-E gives consistent performance improvements over REPA, across different choices of the perceptual encoder model. In particular, with DINOv2-B and DINOv2-L, REPA-E significantly reduces gFID from $24.1 \\rightarrow 16.3$ and from $23.3 \\rightarrow 16.0$ , respectively.", + "bbox": [ + 89, + 493, + 480, + 613 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Variation in VAE. Tab. 4 evaluates the impact of different VAEs on REPA-E performance. In particular, we report results using three different VAEs 1) SD-VAE [1], 2) VA-VAE [50] and 3) IN-VAE (a $16\\times$ downsampling, 32-channel VAE trained on ImageNet [6] using official training code from [40]). Across all variations, REPA-E consistently improves performance over the REPA baseline. REPA-E reduces gFID from $24.1\\rightarrow 16.3$ from $22.7\\rightarrow 12.7$ and $12.8\\rightarrow 11.1$ for SD-VAE, IN-VAE and VA-VAE, respectively. The results demonstrate that REPA-E robustly improves generative quality across diverse variations in architecture, pretraining dataset and training setting of the VAE.", + "bbox": [ + 89, + 619, + 482, + 800 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Variation in Alignment Depth. Tab. 5 investigates the effect of applying the alignment loss at different layers the diffusion model. We observe that REPA-E consistently enhances generation quality over the REPA baseline across variation in choice of alignment depth; with gFID improving from $23.0 \\rightarrow 16.4$ (6th layer), $24.1 \\rightarrow 16.3$ (8th layer), and $23.7 \\rightarrow 16.2$ (10th layer).", + "bbox": [ + 89, + 806, + 482, + 912 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/3e1b51168691e30ece2a426a5f9344b7b39df1ab9f2ca7cae0527e56816b6325.jpg", + "table_caption": [], + "table_footnote": [ + "Table 8. Impact of End-to-End Tuning on VAE Performance. We find that once tuned using REPA-E, the finetuned VAEs can be used as a drop-in replacement for their original counterparts offering significantly accelerated generation performance. We fix all the VAEs and only train the diffusion models (with and w/o REPA). E2E-VAE is obtained from REPA-E fine-tuning (VA-VAE + SiT-XL). All results are reported at 80 epochs (400K iterations)." + ], + "table_body": "
VAEDiffusion modelREPAgFID-50K
SD-VAE [40]DiT-XL [34]19.82
VA-VAE [50]DiT-XL [34]6.74
E2E-VAE (Ours)DiT-XL [34]6.75
SD-VAE [40]SiT-XL [30]17.20
VA-VAE [50]SiT-XL [30]5.93
E2E-VAE (Ours)SiT-XL [30]5.26
SD-VAE [40]DiT-XL [34]12.29
VA-VAE [50]DiT-XL [34]4.71
E2E-VAE (Ours)DiT-XL [34]4.20
SD-VAE [40]SiT-XL [30]7.90
VA-VAE [50]SiT-XL [30]4.88
E2E-VAE (Ours)SiT-XL [30]3.46
", + "bbox": [ + 516, + 88, + 903, + 279 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Ablation on Design Components. We also perform ablation studies analyzing the importance of each component discussed in Sec. 3.2. Results are shown in Tab. 6. We observe that each component plays a key role in the final performance for REPA-E. In particular, we observe that the stop-grad operation on the diffusion loss helps prevent degradation of the latent-space structure. Similarly, the use of batch norm is useful adaptively normalizing the latent-statistics and helps improve the gFID from $18.09 \\rightarrow 16.3$ . Similarly, the regularization losses play a key role in maintaining the reconstruction performance of the finetuned VAE, thereby improving the gFID from $19.07 \\rightarrow 16.3$ .", + "bbox": [ + 511, + 388, + 903, + 570 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "End-to-End Training from Scratch. We next analyze the impact of VAE initialization on end-to-end training. As shown in Tab. 7, we find that while initializing the VAE from pretrained weights helps slightly improve performance, REPA-E can be used to train both VAE and LDM from scratch still achieving superior performance over REPA, which technically requires a separate stage for VAE training in addition to LDM training. For instance, while REPA achieves a FID of 5.90 after 4M iterations, REPA-E while training entirely from scratch (for both VAE and LDM) achieves much faster and better generation FID of 4.34 within just 400K iterations.", + "bbox": [ + 511, + 571, + 903, + 752 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.4. Impact of End-to-End Tuning on VAE", + "text_level": 1, + "bbox": [ + 511, + 766, + 841, + 782 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We next analyze the impact of end-to-end tuning on the VAE. In particular, we first show that end-to-end tuning improves the latent-space structure (Fig. 6). We next show that once tuned using REPA-E, the finetuned VAEs can be used as a drop-in replacement for their original counterparts offering significantly improved generation performance.", + "bbox": [ + 511, + 789, + 903, + 880 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "End-to-End Training improves Latent Space Structure. Results are shown in Fig. 6. Following [24], we visu", + "bbox": [ + 511, + 882, + 903, + 912 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/3bd08f1e01a8a2b5250db10979a050e6a8557330b32383b5d72fc672ca983201.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
TokenizerMethodTraining Epoches#paramsrFID↓Generation w/o CFGGeneration w/ CFG
gFID↓sFID↓IS↑Prec.↑Rec.↑gFID↓sFID↓IS↑Prec.↑Rec.↑
AutoRegressive (AR)
MaskGiTMaskGIT [4]555227M2.286.18-182.10.800.51-----
VQGANLlamaGen [45]3003.1B0.599.388.24112.90.690.672.185.97263.30.810.58
VQVAEVAR [46]3502.0B------1.80-365.40.830.57
LFQ tokenizersMagViT-v2 [52]1080307M1.503.65-200.5--1.78-319.4--
LDMMAR [27]800945M0.532.35-227.80.790.621.55-303.70.810.62
Latent Diffusion Models (LDM)
SD-VAE [40]MaskDiT [56]1600675M0.615.6910.34177.90.740.602.285.67276.60.800.61
DiT [34]1400675M9.626.85121.50.670.672.274.60278.20.830.57
SiT [30]1400675M8.616.32131.70.680.672.064.50270.30.820.59
FasterDiT [51]400675M7.915.45131.30.670.692.034.63264.00.810.60
MDT [12]1300675M6.235.23143.00.710.651.794.57283.00.810.61
MDTv2 [13]1080675M-----1.584.52314.70.790.65
Representation Alignment Methods
VA-VAE [50]LightningDiT [50]80675M0.284.29---------
800675M2.054.37207.70.770.661.254.15295.30.800.65
SD-VAEREPA [54]80675M0.617.905.06122.60.700.65-----
800675M5.845.79158.70.700.681.284.68305.70.790.64
E2E-VAE (Ours)REPA80675M0.283.464.17159.80.770.631.674.12266.30.800.63
800675M1.694.17219.30.770.671.124.09302.90.790.66
", + "bbox": [ + 89, + 87, + 906, + 380 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Table 9. System-Level Performance on ImageNet $256 \\times 256$ comparing our end-to-end tuned VAE (E2E-VAE) with other VAEs for traditional LDM training. Note that all representation alignment methods at 800 epochs are evaluated using a class-balanced sampling protocol, as detailed in App. C. We observe that in addition to improving VAE latent space structure (Fig. 6), end-to-end tuning significantly improves VAE downstream generation performance. Once tuned using REPA-E, the improved VAE can be used as drop-in replacement for their original counterparts for accelerated generation performance. Overall, our approach helps improve both LDM and VAE performance — achieving a new state-of-the-art FID of 1.12 and 0.28, respectively for LDM generation and VAE reconstruction performance.", + "bbox": [ + 89, + 388, + 906, + 474 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "alize latent space structure using principal component analysis (PCA) that projects them to three channels colored by RGB. We consider three different VAEs: 1) SD-VAE [40], 2) IN-VAE (a $16\\times$ downsampling, 32-channel VAE trained on ImageNet [6]). 3) VA-VAE from recent work from [50]. We observe that end-to-end tuning using REPA-E automatically improves the latent space structure of the original VAE. For instance, similar to findings of concurrent work [44], we observe that SD-VAE suffers from high noise components in the latent space. Applying end-to-end training automatically helps adjust the latent space to learn reduce noise. In contrast, other VAEs such as recently proposed VA-VAE [50] suffer from over-smother latent space. Application of E2E tuning automatically helps learn a more detailed latent-space to best support generation performance.", + "bbox": [ + 88, + 500, + 480, + 727 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "End-to-End Training Improves VAE Performance. We next evaluate the impact of end-to-end tuning on downstream generation performance of the VAE. To this end, we first use end-to-end tuning for finetuning the recently proposed VA-VAE [50]. We then use the resulting end-to-end finetuned-VAE (named E2E-VAE), and compare its downstream generation performance with current state-of-the-art VAEs; including SDVAE [40] and VA-VAE [50]. To do this, we conduct traditional latent diffusion model training (w/o REPA-E), where only the generator network is updated while keeping the VAE frozen. Tab. 8 shows the comparison of VAE downstream generation across diverse train", + "bbox": [ + 89, + 729, + 482, + 912 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "ing settings. We observe that end-to-end tuned VAEs consistently outperform their original counterparts for downstream generation tasks across variations in LDM architecture and training settings. Interestingly, we observe that a VAE tuned using SiT-XL yields performance improvements even when using a different LDM architecture such as DiT-XL; thereby demonstrating the robustness of our approach.", + "bbox": [ + 511, + 500, + 906, + 608 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "5. Conclusion", + "text_level": 1, + "bbox": [ + 513, + 660, + 633, + 678 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "\"Can we unlock VAE's for performing end-to-end training with latent diffusion transformers?\" Directly backpropagating diffusion loss to the VAE is ineffective and even degrages final performance. We show that while diffusion loss is ineffective, end-to-end training can be unlocked using REPA loss. Our end-to-end training recipe (REPA-E), significantly improves latent-space structure, shows remarkable performance; speeding up diffusion model training by over $17 \\times$ and $45 \\times$ over REPA and vanilla training recipes. Overall, our approach achieves a new state-of-the-art results with generation FID of 1.12 and 1.69 with and without use of classifier-free guidance. We hope that our work can help foster further research for enabling end-to-end training with latent diffusion transformers.", + "bbox": [ + 511, + 700, + 908, + 912 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Acknowledgments", + "text_level": 1, + "bbox": [ + 91, + 90, + 250, + 107 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "We would like to extend our deepest appreciation to Zeyu Zhang, Qinyu Zhao, and Zhanhao Liang for insightful discussions. We would also like to thank all reviewers for their constructive feedback. This work was supported in part by the Australian Research Council under Discovery Project DP210102801 and Future Fellowship FT240100820. SX acknowledges support from the OpenPath AI Foundation, IITP grant funded by the Korean Government (MSIT) (No. RS-2024-00457882) and NSF Award IIS-2443404.", + "bbox": [ + 89, + 114, + 485, + 241 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 91, + 271, + 187, + 286 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Stability AI. Improved autoencoders ... https://huggingface.co/stabilityyai/sd-vae-ft-mse, n.d. Accessed: April 11, 2025. 5, 8", + "[2] Mahmoud Assran, Quentin Duval, Ishan Misra, Piotr Bojanowski, Pascal Vincent, Michael Rabbat, Yann LeCun, and Nicolas Ballas. Self-supervised learning from images with a joint-embedding predictive architecture. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 15619–15629, 2023. 7", + "[3] Dana H Ballard. Modular learning in neural networks. In Proceedings of the sixth National conference on Artificial intelligence-Volume 1, pages 279-284, 1987. 3", + "[4] Huiwen Chang, Han Zhang, Lu Jiang, Ce Liu, and William T Freeman. Maskgit: Masked generative image transformer. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11315-11325, 2022. 9", + "[5] Junsong Chen, Jincheng Yu, Chongjian Ge, Lewei Yao, Enze Xie, Yue Wu, Zhongdao Wang, James Kwok, Ping Luo, Huchuan Lu, et al. Pixart-alpha: Fast training of diffusion transformer for photorealistic text-to-image synthesis. arXiv preprint arXiv:2310.00426, 2023. 3", + "[6] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In 2009 IEEE conference on computer vision and pattern recognition, pages 248-255. IEEE, 2009. 2, 5, 6, 8, 9, 13", + "[7] Prafulla Dhariwal and Alexander Nichol. Diffusion models beat gans on image synthesis. Advances in neural information processing systems, 34:8780-8794, 2021. 5", + "[8] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, and Neil Houlsby. An image is worth 16x16 words: Transformers for image recognition at scale. In International Conference on Learning Representations, 2021. 3", + "[9] Patrick Esser, Robin Rombach, and Bjorn Ommer. Taming transformers for high-resolution image synthesis. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 12873-12883, 2021. 3", + "[10] Patrick Esser, Sumith Kulal, Andreas Blattmann, Rahim Entezari, Jonas Müller, Harry Saini, Yam Levi, Dominik" + ], + "bbox": [ + 93, + 296, + 483, + 912 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Lorenz, Axel Sauer, Frederic Boesel, et al. Scaling rectified flow transformers for high-resolution image synthesis. In *Forty-first International Conference on Machine Learning*, 2024. 3, 13", + "[11] Peng Gao, Le Zhuo, Ziyi Lin, Chris Liu, Junsong Chen, Ruoyi Du, Enze Xie, Xu Luo, Longtian Qiu, Yuhang Zhang, et al. Lumina-t2x: Transforming text into any modality, resolution, and duration via flow-based large diffusion transformers. arXiv preprint arXiv:2405.05945, 2024. 3", + "[12] Shanghua Gao, Pan Zhou, Ming-Ming Cheng, and Shuicheng Yan. Masked diffusion transformer is a strong image synthesizer. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 23164-23173, 2023. 9", + "[13] Shanghua Gao, Pan Zhou, Ming-Ming Cheng, and Shuicheng Yan. Mdtv2: Masked diffusion transformer is a strong image synthesizer. arXiv preprint arXiv:2303.14389, 2023. 9", + "[14] Ross Girshick. Fast r-cnn. In Proceedings of the IEEE international conference on computer vision, pages 1440-1448, 2015. 1", + "[15] Ross Girshick, Jeff Donahue, Trevor Darrell, and Jitendra Malik. Rich feature hierarchies for accurate object detection and semantic segmentation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 580-587, 2014. 1", + "[16] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 770-778, 2016. 3", + "[17] Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, and Sepp Hochreiter. Gans trained by a two time-scale update rule converge to a local nash equilibrium. Advances in neural information processing systems, 30, 2017. 5", + "[18] Jonathan Ho and Tim Salimans. Classifier-free diffusion guidance. arXiv preprint arXiv:2207.12598, 2022. 6, 7", + "[19] Minyoung Huh, Brian Cheung, Tongzhou Wang, and Phillip Isola. The platonic representation hypothesis. In International Conference on Machine Learning, 2024. 3", + "[20] Sergey Ioffe and Christian Szegedy. Batch normalization: Accelerating deep network training by reducing internal covariate shift. In International conference on machine learning, pages 448-456. pmlr, 2015. 4, 5", + "[21] Dongwon Kim, Ju He, Qihang Yu, Chenglin Yang, Xiaohui Shen, Suha Kwak, and Liang-Chieh Chen. Democratizing text-to-image masked generative models with compact text-aware one-dimensional tokens. arXiv preprint arXiv:2501.07730, 2025. 3", + "[22] Diederik P Kingma. Auto-encoding variational bayes. arXiv preprint arXiv:1312.6114, 2013. 1, 3", + "[23] Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014.5", + "[24] Theodoros Kouzelis, Ioannis Kakogeorgiou, Spyros Gidaris, and Nikos Komodakis. Eq-vae: Equivalence regularized latent space for improved generative image modeling. arXiv preprint arXiv:2502.09509, 2025. 2, 3, 7, 8" + ], + "bbox": [ + 516, + 92, + 903, + 912 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 490, + 935, + 509, + 946 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[25] Tuomas Kynkänniemi, Tero Karras, Samuli Laine, Jaakko Lehtinen, and Timo Aila. Improved precision and recall metric for assessing generative models. Advances in neural information processing systems, 32, 2019. 5", + "[26] Black Forest Labs. Flux. https://github.com/black-forest-labs/flux, 2024.3", + "[27] Tianhong Li, Yonglong Tian, He Li, Mingyang Deng, and Kaiming He. Autoregressive image generation without vector quantization. Advances in Neural Information Processing Systems, 37:56424-56445, 2025. 9, 14", + "[28] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dólar, and C Lawrence Zitnick. Microsoft coco: Common objects in context. In European conference on computer vision, pages 740-755. Springer, 2014. 13", + "[29] I Loshchilov. Decoupled weight decay regularization. arXiv preprint arXiv:1711.05101, 2017. 5", + "[30] Nanye Ma, Mark Goldstein, Michael S Albergo, Nicholas M Boffi, Eric Vanden-Eijnden, and Saining Xie. Sit: Exploring flow and diffusion-based generative models with scalable interpolant transformers. In European Conference on Computer Vision, pages 23-40. Springer, 2024. 2, 3, 5, 6, 7, 8, 9, 13", + "[31] Charlie Nash, Jacob Menick, Sander Dieleman, and Peter Battaglia. Generating images with sparse representations. In International Conference on Machine Learning, pages 7958-7968. PMLR, 2021. 5", + "[32] OpenAI. Sora. https://openai.com/sora, 2024.3", + "[33] Maxime Oquab, Timothee Darct, Theo Moutakanni, Huy Vo, Marc Szafraniec, Vasil Khalidov, Pierre Fernandez, Daniel Haziza, Francisco Massa, Alaaeldin El-Nouby, et al. Dinov2: Learning robust visual features without supervision. Transactions on Machine Learning Research Journal, pages 1-31, 2024. 4, 5, 7", + "[34] William Peebles and Saining Xie. Scalable diffusion models with transformers. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 4195-4205, 2023. 3, 6, 8, 9", + "[35] Pablo Pernias, Dominic Rampas, Mats Leon Richter, Christopher Pal, and Marc Aubreville. Würstchen: An efficient architecture for large-scale text-to-image diffusion models. In The Twelfth International Conference on Learning Representations, 2023. 3", + "[36] Dustin Podell, Zion English, Kyle Lacey, Andreas Blattmann, Tim Dockhorn, Jonas Müller, Joe Penna, and Robin Rombach. SDXL: Improving latent diffusion models for high-resolution image synthesis. In The Twelfth International Conference on Learning Representations, 2024. 3", + "[37] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PMLR, 2021. 7", + "[38] Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. Faster r-cnn: Towards real-time object detection with region proposal networks. IEEE transactions on pattern analysis and machine intelligence, 39(6):1137-1149, 2016. 1" + ], + "bbox": [ + 91, + 90, + 480, + 911 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[39] Sucheng Ren, Qihang Yu, Ju He, Xiaohui Shen, Alan Yuille, and Liang-Chieh Chen. Beyond next-token: Next-x prediction for autoregressive visual generation. arXiv preprint arXiv:2502.20388, 2025. 14", + "[40] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 10684-10695, 2022. 1, 2, 3, 4, 5, 7, 8, 9, 13, 14", + "[41] Leonid I. Rudin, Stanley Osher, and Emad Fatemi. Nonlinear total variation based noise removal algorithms. Physica D: Nonlinear Phenomena, 60(1):259-268, 1992. 13", + "[42] Tim Salimans, Ian Goodfellow, Wojciech Zaremba, Vicki Cheung, Alec Radford, and Xi Chen. Improved techniques for training gans. Advances in neural information processing systems, 29, 2016. 5", + "[43] Jaskirat Singh, Stephen Gould, and Liang Zheng. High-fidelity guided image synthesis with latent diffusion models. arXiv preprint arXiv:2211.17084, 2022. 3", + "[44] Ivan Skorokhodov, Sharath Girish, Benran Hu, Willi Menapace, Yanyu Li, Rameen Abdal, Sergey Tulyakov, and Aliaksandr Siarohin. Improving the diffusability of autoencoders. arXiv preprint arXiv:2502.14831, 2025. 2, 7, 9", + "[45] Peize Sun, Yi Jiang, Shoufa Chen, Shilong Zhang, Bingyue Peng, Ping Luo, and Zehuan Yuan. Autoregressive model beats diffusion: Llama for scalable image generation. arXiv preprint arXiv:2406.06525, 2024. 9", + "[46] Keyu Tian, Yi Jiang, Zehuan Yuan, Bingyue Peng, and Liwei Wang. Visual autoregressive modeling: Scalable image generation via next-scale prediction. Advances in neural information processing systems, 37:84839-84865, 2025. 9", + "[47] Arash Vahdat, Karsten Kreis, and Jan Kautz. Score-based generative modeling in latent space. In Advances in Neural Information Processing Systems, pages 11287-11302. Curran Associates, Inc., 2021. 3, 13", + "[48] Aaron Van Den Oord, Oriol Vinyals, et al. Neural discrete representation learning. Advances in neural information processing systems, 30, 2017. 3", + "[49] Shuai Wang, Zhi Tian, Weilin Huang, and Limin Wang. Ddt: Decoupled diffusion transformer. arXiv preprint arXiv:2504.05741, 2025. 14", + "[50] Jingfeng Yao and Xinggang Wang. Reconstruction vs. generation: Taming optimization dilemma in latent diffusion models. arXiv preprint arXiv:2501.01423, 2025. 3, 7, 8, 9, 14", + "[51] Jingfeng Yao, Wang Cheng, Wenyu Liu, and Xinggang Wang. Fasteredit: Towards faster diffusion transformers training without architecture modification. arXiv preprint arXiv:2410.10356, 2024. 6, 9", + "[52] Lijun Yu, José Lezama, Nitesh B Gundavarapu, Luca Versari, Kihyuk Sohn, David Minnen, Yong Cheng, Vighnesh Birodkar, Agrim Gupta, Xiuye Gu, et al. Language model beats diffusion-tokenizer is key to visual generation. arXiv preprint arXiv:2310.05737, 2023. 9", + "[53] Qihang Yu, Mark Weber, Xueqing Deng, Xiaohui Shen, Daniel Cremers, and Liang-Chieh Chen. An image is worth 32 tokens for reconstruction and generation. Advances in Neural Information Processing Systems, 37:128940-128966, 2025. 3" + ], + "bbox": [ + 516, + 90, + 906, + 910 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 490, + 935, + 506, + 946 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[54] Sihyun Yu, Sangkyung Kwak, Huiwon Jang, Jongheon Jeong, Jonathan Huang, Jinwoo Shin, and Saining Xie. Representation alignment for generation: Training diffusion transformers is easier than you think. arXiv preprint arXiv:2410.06940, 2024. 2, 3, 4, 5, 6, 7, 8, 9, 13", + "[55] Kaiwen Zha, Lijun Yu, Alireza Fathi, David A Ross, Cordelia Schmid, Dina Katabi, and Xiuye Gu. Language-guided image tokenization for generation. arXiv preprint arXiv:2412.05796, 2024. 3", + "[56] Hongkai Zheng, Weili Nie, Arash Vahdat, and Anima Anandkumar. Fast training of diffusion models with masked transformers. arXiv preprint arXiv:2306.09305, 2023. 6, 9" + ], + "bbox": [ + 91, + 90, + 480, + 260 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "REPA-E: Unlocking VAE for End-to-End Tuning with Latent Diffusion Transformers", + "text_level": 1, + "bbox": [ + 140, + 85, + 854, + 128 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Supplementary Material", + "bbox": [ + 380, + 141, + 614, + 162 + ], + "page_idx": 12 + }, + { + "type": "table", + "img_path": "images/bbdd03c0a00e3108ede5e16f8df6580adb828343ef7b9f14bbcf5aac90c1174e.jpg", + "table_caption": [], + "table_footnote": [ + "Table 10. Impact of Naive End-to-End Training with Diffusion Loss. We report total variation [41] and mean variance along each VAE latent channel for three training settings: 1) Standard LDM training (w/o end-to-end (E2E) tuning), 2) Naive E2E tuning with Diffusion loss, 3) E2E tuning with REPA loss [54]. All experiments use SDVAE for VAE initialization. We observe that using diffusion loss for end-to-end tuning encourages learning a simpler latent space with lower variance along the spatial dimensions (Fig. 3a). The simpler latent space is easier for denoising objective ( $\\S 3.1$ ), but degrades final generation performance (Fig. 1). All results are reported at 400K iterations with SiT-XL/2 [30] as LDM." + ], + "table_body": "
Training StrategySpatial VarianceTotal Variation
w/o E2E Tuning17.066627.35
E2E w/ REPA Loss18.025516.14
E2E w/ Diff. Loss0.0289.80
", + "bbox": [ + 94, + 176, + 478, + 250 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "A. Impact of Diffusion Loss on Latent Space", + "text_level": 1, + "bbox": [ + 89, + 438, + 465, + 455 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "We analyze the effect of naively using diffusion loss for end-to-end tuning, focusing on how it alters the latent space structure. All experiments here use SD-VAE for tokenizer initialization and SiT-XL/2 [30] as the latent diffusion model, trained for 400K iterations without classifier-free guidance. We report two metrics to quantify latent structure, 1) Spatial Variance, computed as the mean per-channel variance across spatial dimensions, and 2) Total Variation [41], which captures local spatial differences in the latent map.", + "bbox": [ + 89, + 463, + 482, + 613 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "As shown in Tab. 10 and Fig. 3, directly backpropagating the diffusion loss leads to reduced spatial variance, which creates an easier denoising problem by hacking the latent space but leads to reduced image generation performance. In contrast, end-to-end training with REPA-E not only leads to improved generation performance but also improves the latent space structure for the underlying VAE (Fig. 3, 6).", + "bbox": [ + 89, + 614, + 482, + 720 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "B. Additional Analysis", + "text_level": 1, + "bbox": [ + 89, + 734, + 284, + 752 + ], + "page_idx": 12 + }, + { + "type": "table", + "img_path": "images/84102d205259392e556ce48c91dd5529c425e9dd4d94c96963ebdc7eb5e385a1.jpg", + "table_caption": [], + "table_footnote": [ + "Table 11. Comparison with LSGM Objective. REPA-E shows better generation performance and convergence speed." + ], + "table_body": "
MethodgFID ↓sFID ↓IS ↑Prec. ↑Rec. ↑
REPA + E2E-Diffusion444.1460.31.490.000.00
REPA + E2E-LSGM9.895.07107.50.720.61
REPA-E (Ours)4.074.60161.80.760.62
", + "bbox": [ + 91, + 773, + 482, + 844 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Comparison of End-to-End Training Objectives. We provide additional results comparing different objectives for", + "bbox": [ + 89, + 881, + 483, + 912 + ], + "page_idx": 12 + }, + { + "type": "table", + "img_path": "images/fb38cb45bdc201bce864203f0564b39c6f26db98d3284c2cba327117327406b1.jpg", + "table_caption": [], + "table_footnote": [ + "Table 12. Scaling REPA-E to Higher Resolution. System-level results on ImageNet-512 with $64 \\times 64$ latents using SiT-L at 100K steps without classifier-free guidance. We observe that REPA-E leads to significant performance improvements over vanilla-REPA [54] even at high resolutions." + ], + "table_body": "
MethodgFID↓sFID↓IS↑Prec.↑Rec.↑
REPA + SiT-L22.25.6858.30.740.60
REPA-E + SiT-L12.84.6090.60.790.61
", + "bbox": [ + 514, + 176, + 906, + 234 + ], + "page_idx": 12 + }, + { + "type": "table", + "img_path": "images/9c9161a3c487fa16b6c142d4839b37e74004fe3be79b441c86019cad3af5488a.jpg", + "table_caption": [], + "table_footnote": [ + "Table 13. Generalization to T2I Tasks. FID results on MSCOCO text-to-image generation using MMDiT + REPA. We find that end-to-end tuned VAEs (E2E-VAE) also generalizes to T2I tasks showing improved generation performance." + ], + "table_body": "
SamplerODE, NFE=50SDE, NFE=250
gFIDVA-VAE 5.43E2E-VAE 5.02VA-VAE 5.57E2E-VAE 4.97
", + "bbox": [ + 514, + 316, + 903, + 369 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "end-to-end training of VAE and LDM. Specifically, we evaluate: 1) naive E2E training by backpropagating diffusion loss to VAE encoder, 2) the LSGM entropy-regularized objective [47], 3) our proposed REPA-E. All methods are trained with SiT-XL for 400K steps under consistent settings.", + "bbox": [ + 511, + 450, + 905, + 541 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "The LSGM objective prevents feature collapse by maximizing entropy of the latent space. However, as shown in Tab. 11, our REPA-E formulation yields better performance across all metrics at just $400\\mathrm{K}$ steps, with significantly faster convergence and stronger generation quality.", + "bbox": [ + 511, + 542, + 905, + 619 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Scaling REPA-E to Higher Latent Resolution. We conduct experiments on ImageNet-512 [6] to evaluate the performance of REPA-E under higher-resolution latent settings $(64 \\times 64)$ . We use SD-VAE [40] as the tokenizer and SiT-L as the diffusion model, trained for 100K steps and we report the performance without classifier-free guidance. As shown in Tab. 12, our approach yields significant improvements in generation quality compared to REPA.", + "bbox": [ + 511, + 621, + 905, + 743 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "MSCOCO Text-to-Image Generation with E2E-VAE. To further evaluate the utility of the tuned VAE beyond ImageNet, we assess its performance in a text-to-image generation (T2I) setting on MSCOCO [28]. Following REPA [54], we adopt MMDiT [10] as the diffusion backbone and apply REPA loss across all variants. All models are trained for 100K steps and evaluated using classifier-free guidance with $\\alpha_{\\mathrm{cfg}} = 2.0$ and EMA weights during inference. We report generation FID, and observe that replacing VA-VAE with our E2E-VAE consistently improves downstream text-to-image generation quality (Tab. 13).", + "bbox": [ + 511, + 744, + 906, + 912 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 12 + }, + { + "type": "table", + "img_path": "images/f4c6791c5721f3d932ba8c2d44bb386e00abe01bc7069b0e7e53337554bc6967.jpg", + "table_caption": [], + "table_footnote": [ + "Table 14. VAE Reconstruction Evaluation on ImageNet-256. While REPA-E primarily improves the generative capability of the VAE (see Tab. 9), it also maintains competitive reconstruction quality across all metrics." + ], + "table_body": "
AutoencoderPSNR↑SSIM↑LPIPS↓rFID↓
SD-VAE [40]25.670.720.130.74
+REPA-E (Ours)24.840.710.150.53
IN-VAE (f16d32)27.400.800.090.26
+REPA-E (Ours)26.870.780.110.27
VA-VAE [50]26.320.760.110.28
+REPA-E (Ours)26.250.750.110.28
", + "bbox": [ + 91, + 88, + 480, + 213 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "C. Remarks on FID Evaluation", + "text_level": 1, + "bbox": [ + 94, + 297, + 354, + 314 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Throughout the paper, we follow the standard ImageNet conditional evaluation protocol, where 50,000 images are generated by randomly sampling class labels. Recent papers [27, 39, 49] have adopted class-balanced generation for evaluation, where 50 images per class are generated across the 1,000 categories. To our surprise, we found that using class-balanced sampling yields slightly better FID performance. Therefore, for the results in Tab. 9, we adopt this class-balanced sampling strategy. Accordingly, all representation alignment methods at the 800-epoch checkpoint in this table are computed under the balanced sampling protocol to ensure a fair and consistent comparison.", + "bbox": [ + 93, + 323, + 480, + 503 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 491, + 935, + 506, + 946 + ], + "page_idx": 13 + } +] \ No newline at end of file diff --git a/data/2025/2504_10xxx/2504.10483/9b7bb575-f36f-48cf-a562-8fb8239c8a45_model.json b/data/2025/2504_10xxx/2504.10483/9b7bb575-f36f-48cf-a562-8fb8239c8a45_model.json new file mode 100644 index 0000000000000000000000000000000000000000..8ffaac74e36a3bc0a8640d8aa8cc7666db9bc4cc --- /dev/null +++ b/data/2025/2504_10xxx/2504.10483/9b7bb575-f36f-48cf-a562-8fb8239c8a45_model.json @@ -0,0 +1,2956 @@ +[ + [ + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.277, + 0.058, + 0.718 + ], + "angle": 270, + "content": "arXiv:2504.10483v3 [cs.CV] 22 Oct 2025" + }, + { + "type": "title", + "bbox": [ + 0.142, + 0.13, + 0.856, + 0.175 + ], + "angle": 0, + "content": "REPA-E: Unlocking VAE for End-to-End Tuning with Latent Diffusion Transformers" + }, + { + "type": "text", + "bbox": [ + 0.194, + 0.203, + 0.803, + 0.24 + ], + "angle": 0, + "content": "Xingjian Leng\\(^{\\alpha \\star}\\) Jaskirat Singh\\(^{\\alpha \\star}\\) Yunzhong Hou\\(^{\\alpha}\\) Zhenchang Xing\\(^{\\beta}\\) Saining Xie\\(^{\\chi}\\) Liang Zheng\\(^{\\alpha}\\)" + }, + { + "type": "text", + "bbox": [ + 0.194, + 0.246, + 0.803, + 0.265 + ], + "angle": 0, + "content": "\\(^{\\alpha}\\)Australian National University \\(\\beta\\)Data61 CSIRO \\(\\chi\\)New York University" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.268, + 0.81, + 0.283 + ], + "angle": 0, + "content": "{xingjian.length\\*, jaskirat.singh\\*, yunzhong.hou, liang.zheng}@anu.edu.au" + }, + { + "type": "text", + "bbox": [ + 0.271, + 0.286, + 0.724, + 0.3 + ], + "angle": 0, + "content": "zhenchang.xing@data61.csiro.au saining.xie@nyu.edu" + }, + { + "type": "image", + "bbox": [ + 0.099, + 0.322, + 0.226, + 0.5 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.133, + 0.501, + 0.222, + 0.528 + ], + "angle": 0, + "content": "a) Traditional LDM Training" + }, + { + "type": "image", + "bbox": [ + 0.264, + 0.322, + 0.613, + 0.5 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.285, + 0.501, + 0.408, + 0.528 + ], + "angle": 0, + "content": "b) Naive End-to-End LDM Training" + }, + { + "type": "image_caption", + "bbox": [ + 0.497, + 0.5, + 0.564, + 0.527 + ], + "angle": 0, + "content": "c) REPA-E (Ours)" + }, + { + "type": "image", + "bbox": [ + 0.645, + 0.327, + 0.903, + 0.498 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.671, + 0.501, + 0.876, + 0.527 + ], + "angle": 0, + "content": "d) Training Steps vs. FID-50K Improved Generation Performance" + }, + { + "type": "image_caption", + "bbox": [ + 0.089, + 0.533, + 0.907, + 0.618 + ], + "angle": 0, + "content": "Figure 1. Can we unlock VAE for end-to-end tuning with latent-diffusion models? - Traditional deep learning wisdom dictates that end-to-end training is often preferable when possible. However, latent diffusion models usually only update the generator network while keeping the variational auto-encoder (VAE) fixed (a). This is because directly using the diffusion loss to update the VAE (b) causes the latent space to collapse. We show that while direct diffusion-loss is ineffective, end-to-end training can be unlocked through the representation-alignment (REPA) loss - allowing both encoder and diffusion model to be jointly tuned during the training process (c). Notably, this allows for significantly accelerated training; speeding up training by over \\(17\\times\\) and \\(45\\times\\) over REPA and vanilla training recipes, respectively (d)." + }, + { + "type": "title", + "bbox": [ + 0.248, + 0.63, + 0.327, + 0.644 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.662, + 0.486, + 0.889 + ], + "angle": 0, + "content": "In this paper we tackle a fundamental question: \"Can we train latent diffusion models together with the variational auto-encoder (VAE) tokenizer in an end-to-end manner?\" Traditional deep-learning wisdom dictates that end-to-end training is often preferable when possible. However, for latent diffusion transformers, it is observed that end-to-end training both VAE and diffusion-model using standard diffusion-loss is ineffective, even causing a degradation in final performance. We show that while diffusion loss is ineffective, end-to-end training can be unlocked through the representation-alignment (REPA) loss - allowing both VAE and diffusion model to be jointly tuned during the training process. Despite its simplicity, the proposed training recipe (REPA-E) shows remarkable performance; speeding up diffusion model training by over \\(17 \\times\\) and \\(45 \\times\\) over REPA and" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.631, + 0.907, + 0.753 + ], + "angle": 0, + "content": "vanilla training recipes, respectively. Interestingly, we observe that end-to-end tuning with REPA-E also improves the VAE itself; leading to improved latent space structure and downstream generation performance. In terms of final performance, our approach sets a new state-of-the-art; achieving FID of 1.12 and 1.69 with and without classifier-free guidance on ImageNet \\(256 \\times 256\\). Code is available at https://end2end-diffusion.github.io." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.781, + 0.645, + 0.797 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.806, + 0.907, + 0.913 + ], + "angle": 0, + "content": "End-to-end training has propelled the field forward for the past decade. It is understood that incorporating more components into end-to-end training can lead to increased performance, as evidenced by the evolution of the RCNN family [14, 15, 38]. With that said, training schemes of latent diffusion models (LDMs) [40] remain two-stage: first, the variational auto-encoder (VAE) [22] is trained with the re" + }, + { + "type": "page_footnote", + "bbox": [ + 0.116, + 0.899, + 0.233, + 0.912 + ], + "angle": 0, + "content": "* Equal Contribution." + }, + { + "type": "page_number", + "bbox": [ + 0.495, + 0.936, + 0.504, + 0.948 + ], + "angle": 0, + "content": "1" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.123, + 0.079, + 0.462, + 0.278 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.16, + 0.282, + 0.424, + 0.295 + ], + "angle": 0, + "content": "(a) PCA Analysis on VAE Latent Space Structure" + }, + { + "type": "image", + "bbox": [ + 0.547, + 0.08, + 0.872, + 0.278 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.553, + 0.282, + 0.866, + 0.295 + ], + "angle": 0, + "content": "(b) Performance Improvements with REPA-E (400K Steps)" + }, + { + "type": "image_caption", + "bbox": [ + 0.089, + 0.302, + 0.907, + 0.4 + ], + "angle": 0, + "content": "Figure 2. End-to-End Training Automatically Improves VAE Latent-Space Structure. (a) Following [24], we visualize latent space structure from different VAEs before and after end-to-end training using principal component analysis (PCA) that projects them to three channels colored by RGB. We consider SD-VAE [40], and IN-VAE\\(^1\\), a \\(16 \\times\\) downsampling, 32-channel VAE trained on ImageNet [6]. For SD-VAE we find that latent representations have high-frequency noise. Applying end-to-end tuning helps learning a more smooth and less noisy latent representation. Interestingly to the contrast, the latent space for IN-VAE is over-smoothed (e.g., row-2). Applying end-to-end tuning automatically helps learn a more detailed latent space structure to best support final generation performance. (b) Jointly tuning both VAE and latent diffusion model (LDM) significantly improves final generation performance (gFID) across different VAE architectures." + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.42, + 0.482, + 0.451 + ], + "angle": 0, + "content": "construction loss; then, the diffusion model is trained with the diffusion loss while keeping the VAE fixed (see Fig. 1a)." + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.453, + 0.483, + 0.68 + ], + "angle": 0, + "content": "The above two-stage division of the LDM training process, though popular, leads to a challenging optimization task: \"How to best optimize the representation from first stage (VAE) for optimal performance while training the second stage (diffusion model)?\" While recent works study the interplay between the performance of the two stages [24, 44], they are often limited to empirical analysis, which may vary depending on the architecture and training setting for both the VAE and the diffusion model. For instance, in a concurrent work [44] show that the latent space of popular autoencoders e.g., SD-VAE [40] suffer from high-frequency noise / components. However, as seen in Fig. 2 & 6, while the same holds for some VAEs (e.g. SD-VAE), it might not be true for other VAE architectures — which instead might suffer from an over-smoothed latent space (Fig. 2, 6)." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.684, + 0.483, + 0.834 + ], + "angle": 0, + "content": "In this paper, we therefore ask a fundamental question: \"Can we jointly tune both VAE and LDM in an end-to-end manner to best optimize final generation performance?\" Technically, it is straightforward to do end-to-end LDM training by simply back-propagating the diffusion loss to the VAE tokenizer. However, experiments (§3) reveal that this naive approach for end-to-end training is ineffective. The diffusion loss encourages learning a simpler latent space structure which is easier for denoising objective (refer §3.1), but leads to reduced generation performance (Fig. 1d)." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.837, + 0.484, + 0.913 + ], + "angle": 0, + "content": "To address this, we propose REPA-E; an end-to-end training recipe using representation alignment loss [54]. We show that while the diffusion loss is ineffective, end-to-end tuning can be unlocked through the recently proposed representation-alignment (REPA) loss - allowing both VAE" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.42, + 0.907, + 0.602 + ], + "angle": 0, + "content": "and diffusion model to be jointly tuned during training process. Through extensive evaluations, we demonstrate that end-to-end tuning with REPA-E offers several advantages; End-to-End Training Leads to Accelerated Generation Performance; speeding up diffusion training by over \\(17 \\times\\) and \\(45 \\times\\) over REPA and vanilla training recipes (Fig. 1d). Furthermore, it also helps significantly improve the final generation performance. For instance as seen in Fig. 1d, we find that when using the popular SiT-XL [30] architecture, REPA-E reaches an FID of 4.07 within 400K steps, significantly boosting final performance over even REPA which only only reaches a final FID for 5.9 after 4M steps [54]." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.602, + 0.909, + 0.858 + ], + "angle": 0, + "content": "End-to-End Training improves VAE latent-space structure. As seen in Fig. 2 and §4.4, we find that jointly tuning the VAE and latent diffusion model during training, automatically improves the latent space structure across different VAE architectures. For instance, for SD-VAE [40], it is observed that the original latent space suffers from high-frequency noise (Fig. 2). Applying end-to-end tuning helps learn a more smooth latent space representation. In contrast, the latent space for IN-VAE1 is over-smoothed. Applying REPA-E automatically helps learn more detailed latent space structure to best support generation performance. End-to-End Tuning Improves VAE Performance. Finally, we find that once tuned using REPA-E, the end-to-end tuned VAE can be used as a drop-in replacement for their original counterparts (e.g. SD-VAE) showing improved generation performance across diverse training settings and model architectures (refer §4.4)." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.858, + 0.908, + 0.89 + ], + "angle": 0, + "content": "To summarize, key contributions of this paper are: 1) We propose REPA-E; an end-to-end training recipe for jointly" + }, + { + "type": "page_footnote", + "bbox": [ + 0.53, + 0.899, + 0.898, + 0.912 + ], + "angle": 0, + "content": "\\( {}^{1} \\) trained onImagenet at \\( {f16d32} \\) using official training code from [40]." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.505, + 0.948 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.09, + 0.092, + 0.482, + 0.258 + ], + "angle": 0, + "content": "tuning both VAE and LDM using representation alignment loss (§3). 2) We find that despite its simplicity, REPA-E leads to accelerated generation performance; speeding up diffusion training by over \\(17 \\times\\) and \\(45 \\times\\) over REPA and vanilla training recipes, respectively (§4.2). 3) We show that end-to-end training is able to adaptively improve the latent space structure across diverse VAE architectures. 4) We demonstrate that once tuned using REPA-E, the end-to-end tuned VAE can be used as a drop-in replacement for their original counterparts (e.g., SD-VAE), exhibiting significantly better downstream generation performance (§4.4)." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.272, + 0.233, + 0.287 + ], + "angle": 0, + "content": "2. Related Work" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.298, + 0.483, + 0.418 + ], + "angle": 0, + "content": "Tokenizer or autoencoders (AE) [3] use either the variational objective [22] for continuous tokenization or a vector quantization objective [9, 48] for discrete tokenization [8-10, 16, 21, 22, 36, 40, 48, 53, 55]. However, current tokenizers are primarily trained for minimizing the reconstruction error, which maybe not provide the optimal latent space for generation [24]. We show that improved latent space structure is achieved by end-to-end training of LDMs." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.419, + 0.483, + 0.54 + ], + "angle": 0, + "content": "Latent diffusion models leverage pre-trained image tokenizers to compress images into a lower-dimensional latent space to simplify the generative task [5, 10, 10, 11, 26, 32, 36, 40, 43, 47]. Despite their effectiveness, existing tokenizers and diffusion models are trained separately [10, 36, 40]. In this paper, we explore jointly optimizing tokenizers and diffusion models to achieve faster convergence and improved generation performance (Sec. 4)." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.54, + 0.483, + 0.631 + ], + "angle": 0, + "content": "Representation alignment for generative learning has recently shown huge promise for improving the training speed and performance of diffusion models [35, 50, 54]. We find that instead of applying the REPA loss separately over LDM [54] or VAE [50], significantly better performance and training speed can be achieved through E2E training." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.631, + 0.483, + 0.737 + ], + "angle": 0, + "content": "End-to-End Diffusion. LSGM [47] explores joint training with score-based generative models, which uses a variational lower bound objective with an entropy term for preventing latent space collapse while backpropagating the diffusion loss. We empirically find that while this helps prevent latent space collapse, REPA-E shows significantly faster convergence during E2E training (refer App. B)." + }, + { + "type": "title", + "bbox": [ + 0.09, + 0.751, + 0.482, + 0.768 + ], + "angle": 0, + "content": "3. REPA-E: Unlocking VAE for Joint Training" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.776, + 0.483, + 0.913 + ], + "angle": 0, + "content": "Overview. Given a variational autoencoder (VAE) and latent diffusion transformer (e.g., SiT [30]), we wish to jointly tune the VAE latent representation and diffusion model features in an end-to-end manner to best optimize the final generation performance. To this end, we first make three key insights in §3.1: 1) Naive end-to-end tuning - directly back-propagating the diffusion loss to the VAE is ineffective. The diffusion loss encourages learning a more simpler latent space structure (Fig. 3a) which is easier for min" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.092, + 0.905, + 0.273 + ], + "angle": 0, + "content": "imizing the denoising objective [40], but degrades the final generation performance. We next analyze the recently proposed representation-alignment loss [54] showing that; 2) Higher representation-alignment score [54] correlates with improved generation performance (Fig. 3b). This offers an alternate path for improving final generation performance using representation-alignment score as a proxy. 3) The maximum achievable alignment score with vanilla-REPA is bottlenecked by the VAE latent space features. We further show that backpropagating the REPA loss to the VAE during training can help address this limitation, significantly improving final representation-alignment score (Fig. 3c)." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.274, + 0.905, + 0.44 + ], + "angle": 0, + "content": "Given the above insights, we finally propose REPA-E (§3.2); an end-to-end tuning recipe for both VAE and LDM features. Our key idea is simple: instead of directly using diffusion loss for end-to-end tuning, we can use the representation alignment score as a proxy for the final generation performance. This motivates our final approach, where instead of the diffusion loss, we propose to perform end-to-end training using the representation-alignment loss. The end-to-end training with REPA loss helps better improve the final representation-alignment score (Fig. 3b), which in turn leads to improved final generation performance (§3.1)." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.451, + 0.892, + 0.467 + ], + "angle": 0, + "content": "3.1. Motivating End-to-End Training with REPA" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.474, + 0.905, + 0.913 + ], + "angle": 0, + "content": "Naive End-to-End Tuning is Ineffective. We first analyze the naive approach for end-to-end tuning; directly backpropagating the diffusion loss to the VAE tokenizer. As shown in Fig. 3a, we observe that directly backpropagating the diffusion loss encourages learning a more simpler latent space structure with lower variance along the spatial dimensions (Tab. 10). The simpler latent-space structure poses an easier problem for the denoising objective [40], but leads to reduced generation performance (Fig. 1). Consider an intermediate latent \\( z_{t} = \\alpha_{t}z_{\\mathrm{VAE}} + \\sigma_{t}\\epsilon_{orig} \\) for any timestep \\( t \\). The denoising objective [34] mainly aims to predict \\( \\epsilon_{pred} \\); estimating the originally added noise \\( \\epsilon_{orig} \\) from VAE features \\( z_{\\mathrm{VAE}} \\) and timestep \\( t \\). As the variance along the spatial dimensions for VAE latent \\( z_{\\mathrm{VAE}} \\) goes down, the denoising objective effectively reduces to predicting a bias term for recovering back the originally added noise \\( \\epsilon_{orig} \\). Thus, backpropagation the diffusion loss effectively hacks the latent space structure to create an easier denoising problem, but leads to a reduced generation performance (Fig. 1). Higher Representation Alignment Correlates with Better Generation Performance. Similar to the findings of [54], we also measure representation alignment using CKNNA scores [19] across different model sizes and training iterations. As seen in Fig. 3b, we observe that higher representation alignment during the training process leads to improved generation performance. This suggests an alternate path for improving generation performance by using the representation alignment objective instead of the diffusion loss for end-to-end training (refer §3.2)." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.937, + 0.504, + 0.948 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "image_caption", + "bbox": [ + 0.109, + 0.075, + 0.14, + 0.086 + ], + "angle": 0, + "content": "RGB" + }, + { + "type": "image_caption", + "bbox": [ + 0.11, + 0.087, + 0.141, + 0.096 + ], + "angle": 0, + "content": "Image" + }, + { + "type": "image_caption", + "bbox": [ + 0.167, + 0.076, + 0.215, + 0.086 + ], + "angle": 0, + "content": "SDVAE" + }, + { + "type": "image_caption", + "bbox": [ + 0.167, + 0.087, + 0.215, + 0.096 + ], + "angle": 0, + "content": "w/o E2E" + }, + { + "type": "image_caption", + "bbox": [ + 0.231, + 0.076, + 0.282, + 0.086 + ], + "angle": 0, + "content": "E2E with" + }, + { + "type": "image_caption", + "bbox": [ + 0.227, + 0.087, + 0.287, + 0.096 + ], + "angle": 0, + "content": "REPA Loss" + }, + { + "type": "image_caption", + "bbox": [ + 0.296, + 0.076, + 0.348, + 0.086 + ], + "angle": 0, + "content": "E2E with" + }, + { + "type": "image_caption", + "bbox": [ + 0.296, + 0.087, + 0.347, + 0.096 + ], + "angle": 0, + "content": "Diff, Loss" + }, + { + "type": "image", + "bbox": [ + 0.094, + 0.097, + 0.158, + 0.146 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.16, + 0.097, + 0.223, + 0.146 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.225, + 0.097, + 0.29, + 0.146 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.292, + 0.097, + 0.355, + 0.146 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.095, + 0.146, + 0.158, + 0.196 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.16, + 0.146, + 0.223, + 0.196 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.225, + 0.146, + 0.29, + 0.196 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.292, + 0.146, + 0.355, + 0.196 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.095, + 0.196, + 0.158, + 0.246 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.16, + 0.196, + 0.223, + 0.246 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.225, + 0.196, + 0.29, + 0.246 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.292, + 0.196, + 0.355, + 0.246 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.12, + 0.248, + 0.328, + 0.261 + ], + "angle": 0, + "content": "(a) PCA Visualization of Latent Spaces" + }, + { + "type": "image", + "bbox": [ + 0.37, + 0.083, + 0.578, + 0.245 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.37, + 0.248, + 0.58, + 0.261 + ], + "angle": 0, + "content": "(b) Correlation: gFID & CKNNA Score" + }, + { + "type": "image", + "bbox": [ + 0.582, + 0.095, + 0.897, + 0.248 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.613, + 0.248, + 0.887, + 0.261 + ], + "angle": 0, + "content": "(c) E2E tuning with REPA improves CKNNA Score" + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.268, + 0.907, + 0.407 + ], + "angle": 0, + "content": "Figure 3. Motivating End-to-End Tuning using Representation Alignment (REPA) Loss. We make three key insights: 1) Naive end-to-end (E2E) tuning using diffusion loss is ineffective. The diffusion encourages learning a more simpler latent space structure (a) which is easier for denoising objective (refer §3.1) but degrades final generation performance (Fig. 1). We next analyze the recently proposed representation alignment (REPA) loss [54] showing: 2) Higher representation alignment (CKNNA) leads to better generation performance. This suggests an alternate path for improving performance by using representation-alignment (CKNNA) as proxy for generation performance. 3) The maximum achievable CKNNA score with vanilla-REPA is bottlenecked by the VAE features (c) saturating around \\(\\sim 0.42\\). Back-propagating the REPA-loss to the VAE helps address this limitation and improve the final CKNNA score. Given the above insights: we propose REPA-E (\\(\\S 3.2\\)) for end-to-end LDM training. The key idea is simple: instead of using the diffusion loss, we perform end-to-end training using the REPA loss. The end-to-end training with REPA loss helps improve the final representation-alignment (CKNNA), which in turn leads to improved generation performance (\\(\\S 4\\))." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.42, + 0.483, + 0.571 + ], + "angle": 0, + "content": "Representation Alignment is Bottlenecked by the VAE Features. Fig. 3c shows that while the naive application of REPA loss [54] leads to improved representation-alignment (CKNNA) score, the maximum achievable alignment score is still bottlenecked the VAE features saturating around a value of 0.4 (maximum value of 1). Furthermore, we find that backpropagating the representation-alignment loss to the VAE helps address this limitation; allowing end-to-end optimization of the VAE features to best support representation-alignment objective [54]." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.585, + 0.379, + 0.601 + ], + "angle": 0, + "content": "3.2. End-to-End Training with REPA" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.609, + 0.483, + 0.744 + ], + "angle": 0, + "content": "Given the above insights, we next propose REPA-E (§3.2); an end-to-end tuning recipe for jointly training both VAE and LDM features. Instead of directly using diffusion loss, we propose to perform end-to-end training using the representation-alignment loss. The end-to-end training with REPA loss helps better improve the final representation-alignment score (Fig. 3c), which in turn leads to improved final generation performance (refer §4.2). We next discuss key details for implementation of REPA-E for training." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.747, + 0.483, + 0.913 + ], + "angle": 0, + "content": "Batch-Norm Layer for VAE Latent Normalization. To enable end-to-end training, we first introduce a batchnorm layer between the VAE and latent diffusion model (Fig. 1). Typical LDM training involves normalizing the VAE features using precomputed latent statistics (e.g., std \\(= 1 / 0.1825\\) for SD-VAE [40]). This helps normalize the VAE latent outputs to zero mean and unit variance for more efficient training for the diffusion model. However, with end-to-end training the statistics need to be recomputed whenever the VAE model is updated - which is expensive. To address this, we propose the use of a batch" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.42, + 0.905, + 0.495 + ], + "angle": 0, + "content": "norm layer [20] which uses the exponential moving average (EMA) mean and variance as a surrogate for dataset-level statistics. The batch-norm layer thus acts as a differentiable normalization operator without the need for recomputing dataset level statistics after each optimization step." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.496, + 0.906, + 0.648 + ], + "angle": 0, + "content": "End-to-End Representation-Alignment Loss. We next enable end-to-end training, by using the REPA loss [54] for updating the parameters for both VAE and LDM during training. Formally, let \\(\\mathcal{V}_{\\phi}\\) represent the VAE, \\(\\mathcal{D}_{\\theta}\\) be the diffusion model, \\(f\\) be the fixed pretrained perceptual model (e.g., DINO-v2 [33]) for REPA [54] and \\(\\mathbf{x}\\) be a clean image. Also similar to REPA, consider \\(h_{\\omega}(\\mathbf{h}_t)\\) be the projection of diffusion transformer output \\(\\mathbf{h}_t\\) through a trainable projection layer \\(h_{\\omega}\\). We then perform end-to-end training by applying the REPA loss over both LDM and VAE as," + }, + { + "type": "equation", + "bbox": [ + 0.515, + 0.653, + 0.903, + 0.695 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\mathrm {R E P A}} (\\theta , \\phi , \\omega) = - \\mathbb {E} _ {\\mathbf {x}, \\epsilon , t} \\left[ \\frac {1}{N} \\sum_ {n = 1} ^ {N} \\operatorname {s i m} \\left(\\mathbf {y} ^ {[ n ]}, h _ {\\omega} \\left(\\mathbf {h} _ {t} ^ {[ n ]}\\right)\\right) \\right],\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.701, + 0.905, + 0.776 + ], + "angle": 0, + "content": "where \\(\\mathbf{y} = f(\\mathbf{x})\\) is the output of the pretrained perceptual model (e.g., DINO-v2 [33]), \\(N\\) is number of patches, \\(\\mathrm{sim}(< ., . >)\\) computes the patch-wise cosine similarities between pretrained representation \\(\\mathbf{y}\\) from perceptual model (e.g., DINO-v2) and diffusion transformer hidden state \\(\\mathbf{h}_t\\)." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.777, + 0.906, + 0.867 + ], + "angle": 0, + "content": "Diffusion Loss with Stop-Gradient. As discussed in Fig. 3a and §3.1, backpropagating the diffusion loss to the VAE causes a degradation of latent-space structure. To avoid this, we introduce a simple stopgrad operation which limits the application of diffusion loss \\(\\mathcal{L}_{\\mathrm{DIFF}}\\) to only the parameters \\(\\theta\\) of the latent diffusion model \\(\\mathcal{D}_{\\theta}\\)." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.868, + 0.905, + 0.913 + ], + "angle": 0, + "content": "VAE Regularization Losses. Finally, we introduce regularization losses \\(\\mathcal{L}_{\\mathrm{REG}}\\) for VAE \\(\\nu_{\\phi}\\), to ensure that the end-to-end training process does not impact the reconstruction" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.504, + 0.947 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.099, + 0.078, + 0.907, + 0.38 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.385, + 0.907, + 0.429 + ], + "angle": 0, + "content": "Figure 4. End-to-End Tuning (REPA-E) Improves Visual Scaling. We observe that REPA-E produces higher-quality images at \\(400\\mathrm{K}\\) steps compared with the vanilla-REPA and generates more structurally meaningful images even in the early stages of training. Results for both methods are sampled using the same seed, noise and class label. We use a classifier-free guidance scale of 4.0 during sampling." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.441, + 0.483, + 0.501 + ], + "angle": 0, + "content": "performance (rFID) of the original VAE. In particular, following [1], we use three losses, 1) Reconstruction Losses \\((\\mathcal{L}_{\\mathrm{MSE}},\\mathcal{L}_{\\mathrm{LPIPS}})\\), 2) GAN Loss \\((\\mathcal{L}_{\\mathrm{GAN}})\\), 3) KL divergence loss \\((\\mathcal{L}_{\\mathrm{KL}})\\) as regularization loss \\(\\mathcal{L}_{\\mathrm{REG}}\\) for the VAE \\(\\nu_{\\phi}\\)." + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.502, + 0.483, + 0.533 + ], + "angle": 0, + "content": "Overall Training. The overall training is then performed in an end-to-end manner using the following loss," + }, + { + "type": "equation", + "bbox": [ + 0.094, + 0.543, + 0.48, + 0.561 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} (\\theta , \\phi , \\omega) = \\mathcal {L} _ {\\mathrm {D I F F}} (\\theta) + \\lambda \\mathcal {L} _ {\\mathrm {R E P A}} (\\theta , \\phi , \\omega) + \\eta \\mathcal {L} _ {\\mathrm {R E G}} (\\phi),\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.571, + 0.484, + 0.618 + ], + "angle": 0, + "content": "where \\(\\theta, \\phi, \\omega\\) refer to the parameters for the LDM, VAE and trainable REPA projection layer [54], respectively. Further implementation details are provided in §4.1 and Appendix." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.63, + 0.224, + 0.647 + ], + "angle": 0, + "content": "4. Experiments" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.655, + 0.483, + 0.7 + ], + "angle": 0, + "content": "We next validate the performance of REPA-E and the effect of proposed components through extensive evaluation. In particular, we investigate three key research questions:" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.7, + 0.483, + 0.731 + ], + "angle": 0, + "content": "1. Can REPA-E significantly improve generation performance and training speed? (Sec. 4.2, Tab. 1, Fig. 1, 4)" + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.731, + 0.483, + 0.775 + ], + "angle": 0, + "content": "2. Does REPA-E generalize across variations in training settings including model-scale, architecture, encoder model for REPA etc.? (Sec. 4.3, Tab. 2, 3, 4, 5, 6, 7)" + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.776, + 0.484, + 0.822 + ], + "angle": 0, + "content": "3. Analyze the impact of end-to-end tuning (REPA-E) on VAE latent-space structure and downstream generation performance. (please refer Sec. 4.4, Fig. 6, Tab. 8, 9)" + }, + { + "type": "list", + "bbox": [ + 0.091, + 0.7, + 0.484, + 0.822 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.83, + 0.173, + 0.847 + ], + "angle": 0, + "content": "4.1. Setup" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.852, + 0.484, + 0.914 + ], + "angle": 0, + "content": "Implementation Details. We follow the same setup as in SiT [30] and REPA [54] unless otherwise specified. All training is conducted on the ImageNet [6] training split. We adopt the same data preprocessing protocol as" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.441, + 0.907, + 0.773 + ], + "angle": 0, + "content": "in ADM [7], where original images are center-cropped and resized to \\(256 \\times 256\\) resolution. We experiment with publicly available VAEs, including SD-VAE (f8d4) [40], VA-VAE (f16d32) [40], and our own f16d32 VAE trained on ImageNet, referred to as IN-VAE. Depending on the VAE downsampling rate, we adopt SiT-XL/1 and SiT-XL/2 for \\(4 \\times\\) and \\(16 \\times\\) downsampling rates, respectively, where 1 and 2 denote the patch sizes in the transformer embedding layer. We disable affine transformations in the BN [20] layer between the VAE and SiT, relying solely on the running mean and standard deviation. The VAE regularization loss combines multiple objectives and is defined as: \\(\\mathcal{L}_{\\mathrm{REG}} = \\mathcal{L}_{\\mathrm{KL}} + \\mathcal{L}_{\\mathrm{MSE}} + \\mathcal{L}_{\\mathrm{LPIPS}} + \\mathcal{L}_{\\mathrm{GAN}}\\). For alignment loss, we use DINOv2 [33] as external visual features and apply alignment to the eighth layer of the SiT model. Empirically, we set the alignment loss coefficient to \\(\\lambda_{\\mathrm{REPA}_g} = 0.5\\) for updating SiT and \\(\\lambda_{\\mathrm{REPA}_v} = 1.5\\) for VAE. For optimization, we use AdamW [23, 29] with a constant learning rate of \\(1 \\times 10^{-4}\\), and a global batch size of 256. During training, we apply gradient clipping and exponential moving average (EMA) to the generative model for stable optimization. All experiments are conducted on 8 NVIDIA H100 GPUs." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.776, + 0.909, + 0.913 + ], + "angle": 0, + "content": "Evaluation. For image generation evaluation, we strictly follow the ADM setup [7]. We report generation quality using Fréchet inception distance (gFID) [17], structural FID (sFID) [31], inception score (IS) [42], precision (Prec.) and recall (Rec.) [25], measured on 50K generated images. For sampling, we follow the approach in SiT [30] and REPA [54], using the SDE Euler-Maruyama sampler with 250 steps. In terms of VAE benchmark, we measure the reconstruction FID (rFID) on 50K images from the Im" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.505, + 0.948 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.113, + 0.089, + 0.239, + 0.187 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.243, + 0.089, + 0.369, + 0.187 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.372, + 0.089, + 0.498, + 0.187 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.501, + 0.089, + 0.627, + 0.187 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.63, + 0.089, + 0.755, + 0.187 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.758, + 0.089, + 0.884, + 0.187 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.114, + 0.188, + 0.24, + 0.286 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.243, + 0.188, + 0.369, + 0.286 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.372, + 0.188, + 0.498, + 0.286 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.501, + 0.188, + 0.627, + 0.286 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.63, + 0.188, + 0.755, + 0.286 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.758, + 0.188, + 0.885, + 0.286 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.114, + 0.288, + 0.24, + 0.385 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.243, + 0.287, + 0.369, + 0.385 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.372, + 0.287, + 0.498, + 0.385 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.501, + 0.287, + 0.627, + 0.385 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.63, + 0.287, + 0.755, + 0.385 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.758, + 0.287, + 0.885, + 0.385 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.095, + 0.391, + 0.9, + 0.407 + ], + "angle": 0, + "content": "Figure 5. Qualitative Results on Imagenet \\(256 \\times 256\\) using E2E-VAE and SiT-XL. We use a classifier-free guidance scale \\(\\alpha_{\\mathrm{cfg}} = 4.0\\)." + }, + { + "type": "table", + "bbox": [ + 0.094, + 0.436, + 0.482, + 0.645 + ], + "angle": 0, + "content": "
MethodTokenizerEpochsgFID↓sFID↓IS↑
Without End-to-End Tuning
MaskDiT [56]SD-VAE16005.6910.34177.9
DiT [34]14009.626.85121.5
SiT [30]14008.616.32131.7
FasterDiT [51]4007.915.45131.3
REPA [54]SD-VAE2019.406.0667.4
4011.106.0667.4
807.905.06122.6
8005.905.73157.8
With End-to-End Tuning (Ours)
REPA-ESD-VAE*2012.835.0488.8
407.174.39123.7
804.074.60161.8
" + }, + { + "type": "table_footnote", + "bbox": [ + 0.09, + 0.647, + 0.483, + 0.757 + ], + "angle": 0, + "content": "Table 1. REPA-E for Accelerated Generation Performance. End-to-End training with REPA-E achieves significantly better performance (lower gFID) while using fewer epochs. Notably, REPA-E with only 80 epochs surpasses vanilla REPA using \\(10 \\times\\) epochs. * indicates that VAE is updated during end-to-end training. All results are w/o classifier-free guidance on ImageNet 256 × 256. Additional system-level comparisons with classifier-free guidance and state-of-the-art results are provided in Tab. 9." + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.769, + 0.447, + 0.784 + ], + "angle": 0, + "content": "ageNet [6] validation set at a resolution of \\(256 \\times 256\\)." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.797, + 0.465, + 0.813 + ], + "angle": 0, + "content": "4.2. Impact on Training Performance and Speed" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.82, + 0.483, + 0.866 + ], + "angle": 0, + "content": "We first analyze the impact of end-to-end tuning using REPA-E (Sec. 3.2) for improving generation performance and speed when training latent-diffusion transformers." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.867, + 0.484, + 0.913 + ], + "angle": 0, + "content": "Quantitative Evaluation. We compare REPA-E against various latent diffusion model (LDM) baselines in Tab. 1. We evaluate models of similar sizes (\\(\\sim\\)675M parameters)" + }, + { + "type": "table", + "bbox": [ + 0.518, + 0.436, + 0.9, + 0.548 + ], + "angle": 0, + "content": "
Diff. ModelgFID↓sFID↓IS↑Prec.↑Rec.↑
SiT-B (130M)49.57.0027.50.460.59
+REPA-E (Ours)34.86.3139.10.570.59
SiT-L (458M)24.16.2555.70.620.60
+REPA-E (Ours)16.35.6975.00.680.60
SiT-XL (675M)19.46.0667.40.640.61
+REPA-E (Ours)12.85.0488.80.710.58
" + }, + { + "type": "table_caption", + "bbox": [ + 0.513, + 0.553, + 0.906, + 0.596 + ], + "angle": 0, + "content": "Table 2. Variation in Model-Scale. We find that REPA-E brings substantial performance improvements across all model-scales. All baselines are reported using vanilla-REPA [54] for training." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.608, + 0.907, + 0.82 + ], + "angle": 0, + "content": "on ImageNet \\(256 \\times 256\\) generation task. All results are reported without classifier-free guidance [18] using popular SiT-XL [30] model for training. We make two observations; 1) End-to-End tuning leads to faster training: consistently improving generation FID (gFID) from \\(19.40 \\rightarrow 12.83\\) (20 epochs), \\(11.10 \\rightarrow 7.17\\) (40 epochs), and \\(7.90 \\rightarrow 4.07\\) (80 epochs), even when comparing with REPA [54]. 2) End-to-End training leads to better final performance: REPA-E at 80 epochs surpasses FasterDiT [51] (\\(gFID = 7.91\\)) trained for 400 epochs and even MaskDiT [56], DiT [34], and SiT [30] which are trained over 1400 epochs. For instance, REPA-E reaches an FID of 4.07 within 400K steps, significantly boosting final performance over even REPA which only reaches a final FID for 5.9 after 4M steps [54]." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.822, + 0.906, + 0.913 + ], + "angle": 0, + "content": "Qualitative Evaluation. We provide qualitative comparisons between REPA [54] and REPA-E in Fig. 4. We generate images from the same noise and label using checkpoints at \\(50\\mathrm{K}\\), \\(100\\mathrm{K}\\), and \\(400\\mathrm{K}\\) training iterations, respectively. As seen in Fig. 4, we observe that REPA-E demonstrates superior image generation quality compared to the" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.937, + 0.505, + 0.948 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.092, + 0.075, + 0.416, + 0.282 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.111, + 0.286, + 0.393, + 0.3 + ], + "angle": 0, + "content": "(a) PCA Visualization of Latent Space Structure [24]" + }, + { + "type": "image", + "bbox": [ + 0.447, + 0.075, + 0.905, + 0.281 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.45, + 0.285, + 0.903, + 0.3 + ], + "angle": 0, + "content": "(b) Impact of End-to-End Tuning for Automatically Improving Latent Space Structure" + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.302, + 0.907, + 0.4 + ], + "angle": 0, + "content": "Figure 6. End-to-End Training Improves Latent Space Structure. (a) We observe that the latent space of pretrained VAEs can suffer either high noise components (e.g., SDXL-VAE, SD-VAE [40]), or, be over-smoothed and lack details (e.g., VA-VAE [50]). (b) The use of end-to-end tuning (\\(\\S 3.2\\)) automatically helps improve the latent space structure in a model-agnostic manner across different VAE architectures. For instance, similar to findings of concurrent work [44], we observe that SD-VAE suffers from high noise components in the latent space. Applying end-to-end training automatically helps adjust the latent space to reduce noise. In contrast, other VAEs such as recently proposed VA-VAE [50] suffer from an over-smoothed latent space. The use of end-to-end tuning with REPA-E automatically helps learn a more detailed latent-space structure to best support generation performance." + }, + { + "type": "table", + "bbox": [ + 0.095, + 0.413, + 0.479, + 0.556 + ], + "angle": 0, + "content": "
Target Repr.gFID↓sFID↓IS↑Prec.↑Rec.↑
I-JEPA-H [2]23.05.8160.30.620.60
+REPA-E (Ours)16.55.1873.60.680.60
CLIP-L [37]29.25.9846.40.590.61
+REPA-E (Ours)23.46.4457.10.620.60
DINOv2-B [33]24.16.2555.70.620.60
+REPA-E (Ours)16.35.6975.00.680.60
DINOv2-L [33]23.35.8959.90.610.60
+REPA-E (Ours)16.05.5977.70.680.58
" + }, + { + "type": "table_caption", + "bbox": [ + 0.091, + 0.558, + 0.483, + 0.616 + ], + "angle": 0, + "content": "Table 3. Variation in Representation Encoder. REPA-E yields consistent performance improvements across different choices for the representation-encoder used for representation-alignment [54]. All baselines are reported using vanilla-REPA [54] for training." + }, + { + "type": "table", + "bbox": [ + 0.095, + 0.624, + 0.479, + 0.737 + ], + "angle": 0, + "content": "
AutoencodergFID↓sFID↓IS↑Prec.↑Rec.↑
SD-VAE [40]24.16.2555.70.620.60
+REPA-E (Ours)16.35.6975.00.680.60
IN-VAE (f16d32)22.75.4756.00.620.62
+REPA-E (Ours)12.75.5784.00.690.62
VA-VAE [50]12.86.4783.80.710.58
+REPA-E (Ours)11.15.3188.80.720.61
" + }, + { + "type": "table_caption", + "bbox": [ + 0.091, + 0.738, + 0.483, + 0.767 + ], + "angle": 0, + "content": "Table 4. Variation in VAE Architecture. All baselines are reported using vanilla-REPA [54] for training." + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.775, + 0.483, + 0.806 + ], + "angle": 0, + "content": "REPA baseline, while also generating more structurally meaningful images during early stages of training process." + }, + { + "type": "title", + "bbox": [ + 0.09, + 0.815, + 0.45, + 0.831 + ], + "angle": 0, + "content": "4.3. Generalization and Scalability of REPA-E" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.837, + 0.484, + 0.913 + ], + "angle": 0, + "content": "We next analyze the generalization of the proposed approach to variation in training settings including model-size, tokenizer architecture, representation encoder, alignment depth [54] etc. Unless otherwise specified, all analysis and ablations use SiT-L [30] as the generative model," + }, + { + "type": "table", + "bbox": [ + 0.518, + 0.414, + 0.9, + 0.526 + ], + "angle": 0, + "content": "
Aln. DepthgFID↓sFID↓IS↑Prec.↑Rec.↑
6th layer23.05.7259.20.620.60
+REPA-E (Ours)16.46.6474.30.670.59
8th layer24.16.2555.70.620.60
+REPA-E (Ours)16.35.6975.00.680.60
10th layer23.75.9156.90.620.60
+REPA-E (Ours)16.25.2274.70.680.58
" + }, + { + "type": "table_caption", + "bbox": [ + 0.513, + 0.528, + 0.907, + 0.571 + ], + "angle": 0, + "content": "Table 5. Variation in Alignment Depth. End-to-End tuning (REPA-E) gives consistent performance imrpoements over original REPA [54] across varying alignment-depths." + }, + { + "type": "table", + "bbox": [ + 0.516, + 0.579, + 0.902, + 0.655 + ], + "angle": 0, + "content": "
ComponentgFID↓sFID↓IS↑Prec.↑Rec.↑
w/o stopgrad444.1460.31.490.000.00
w/o batch-norm18.15.3272.40.670.59
w/o LGAN19.26.4768.20.640.58
REPA-E (Ours)16.35.6975.00.680.60
" + }, + { + "type": "table_caption", + "bbox": [ + 0.527, + 0.657, + 0.892, + 0.671 + ], + "angle": 0, + "content": "Table 6. Ablation Study on Role of Different Components." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.684, + 0.907, + 0.775 + ], + "angle": 0, + "content": "SD-VAE as the VAE, and DINOv2-B [33] as the pretrained vision model for REPA loss [54]. Default REPA alignment-depth of 8 is used. We train each variant for 100K iterations and report results without classifier-free guidance [18]. All baseline numbers are reported using vanilla REPA and compared with end-to-end training using REPA-E." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.776, + 0.909, + 0.913 + ], + "angle": 0, + "content": "Impact of Model Size. Tab. 2 compares SiT-B, SiT-L, and SiT-XL to evaluate the effect of model size. We make two key observations. First, across all configurations, REPA-E consistently improves performance over the REPA baseline. Specifically, it reduces gFID from \\(49.5 \\rightarrow 34.8\\) for SiT-B, \\(24.1 \\rightarrow 16.3\\) for SiT-L, and \\(19.4 \\rightarrow 12.8\\) for SiT-XL, demonstrating the effectiveness. Second, surprisingly the percentage gains in gFID achieved with REPA-E (over REPA) improve with increasing model size. For in-" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.505, + 0.948 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.092, + 0.089, + 0.482, + 0.292 + ], + "angle": 0, + "content": "
MethodgFID↓sFID↓IS↑Prec.↑Rec.↑
100K Iterations (20 Epochs)
REPA [54]19.406.0667.40.640.61
REPA-E (scratch)14.127.8783.50.700.59
REPA-E (VAE init.)12.835.0488.80.710.58
200K Iterations (40 Epochs)
REPA [54]11.105.05100.40.690.64
REPA-E (scratch)7.546.17120.40.740.61
REPA-E (VAE init.)7.174.39123.70.740.62
400K Iterations (80 Epochs)
REPA [54]7.905.06122.60.700.65
REPA-E (scratch)4.344.44154.30.750.63
REPA-E (VAE init.)4.074.60161.80.760.62
" + }, + { + "type": "table_footnote", + "bbox": [ + 0.09, + 0.295, + 0.482, + 0.38 + ], + "angle": 0, + "content": "Table 7. End-to-End Training from Scratch. We find that while initializing the VAE with pretrained weights (SD-VAE [40]) helps slightly improve performance, REPA-E can be used to train both VAE and LDM from scratch in an end-to-end manner; still achieving significantly superior performance over REPA which requires a separate stage for training VAE in addition to LDM training." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.397, + 0.482, + 0.489 + ], + "angle": 0, + "content": "stance, for SiT-B model REPA-E leads to a \\(29.6\\%\\) improvement in gFID over REPA. Surprisingly even more gains are achieved for bigger models improving gFID by \\(32.3\\%\\) and \\(34.0\\%\\) for SiT-L and SiT-XL models respectively. This trend highlights the scalability of REPA-E; larger models achieve better percentage gains over vanilla-REPA." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.494, + 0.482, + 0.614 + ], + "angle": 0, + "content": "Variation in Representation Encoder. We report results across different perception model encoders (CLIP-L, I-JEPA-H, DINOv2-B, and DINOv2-L) Tab. 3. We observe that REPA-E gives consistent performance improvements over REPA, across different choices of the perceptual encoder model. In particular, with DINOv2-B and DINOv2-L, REPA-E significantly reduces gFID from \\(24.1 \\rightarrow 16.3\\) and from \\(23.3 \\rightarrow 16.0\\), respectively." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.62, + 0.483, + 0.801 + ], + "angle": 0, + "content": "Variation in VAE. Tab. 4 evaluates the impact of different VAEs on REPA-E performance. In particular, we report results using three different VAEs 1) SD-VAE [1], 2) VA-VAE [50] and 3) IN-VAE (a \\(16\\times\\) downsampling, 32-channel VAE trained on ImageNet [6] using official training code from [40]). Across all variations, REPA-E consistently improves performance over the REPA baseline. REPA-E reduces gFID from \\(24.1\\rightarrow 16.3\\) from \\(22.7\\rightarrow 12.7\\) and \\(12.8\\rightarrow 11.1\\) for SD-VAE, IN-VAE and VA-VAE, respectively. The results demonstrate that REPA-E robustly improves generative quality across diverse variations in architecture, pretraining dataset and training setting of the VAE." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.807, + 0.483, + 0.913 + ], + "angle": 0, + "content": "Variation in Alignment Depth. Tab. 5 investigates the effect of applying the alignment loss at different layers the diffusion model. We observe that REPA-E consistently enhances generation quality over the REPA baseline across variation in choice of alignment depth; with gFID improving from \\(23.0 \\rightarrow 16.4\\) (6th layer), \\(24.1 \\rightarrow 16.3\\) (8th layer), and \\(23.7 \\rightarrow 16.2\\) (10th layer)." + }, + { + "type": "table", + "bbox": [ + 0.517, + 0.089, + 0.905, + 0.28 + ], + "angle": 0, + "content": "
VAEDiffusion modelREPAgFID-50K
SD-VAE [40]DiT-XL [34]19.82
VA-VAE [50]DiT-XL [34]6.74
E2E-VAE (Ours)DiT-XL [34]6.75
SD-VAE [40]SiT-XL [30]17.20
VA-VAE [50]SiT-XL [30]5.93
E2E-VAE (Ours)SiT-XL [30]5.26
SD-VAE [40]DiT-XL [34]12.29
VA-VAE [50]DiT-XL [34]4.71
E2E-VAE (Ours)DiT-XL [34]4.20
SD-VAE [40]SiT-XL [30]7.90
VA-VAE [50]SiT-XL [30]4.88
E2E-VAE (Ours)SiT-XL [30]3.46
" + }, + { + "type": "table_footnote", + "bbox": [ + 0.513, + 0.282, + 0.905, + 0.38 + ], + "angle": 0, + "content": "Table 8. Impact of End-to-End Tuning on VAE Performance. We find that once tuned using REPA-E, the finetuned VAEs can be used as a drop-in replacement for their original counterparts offering significantly accelerated generation performance. We fix all the VAEs and only train the diffusion models (with and w/o REPA). E2E-VAE is obtained from REPA-E fine-tuning (VA-VAE + SiT-XL). All results are reported at 80 epochs (400K iterations)." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.39, + 0.905, + 0.571 + ], + "angle": 0, + "content": "Ablation on Design Components. We also perform ablation studies analyzing the importance of each component discussed in Sec. 3.2. Results are shown in Tab. 6. We observe that each component plays a key role in the final performance for REPA-E. In particular, we observe that the stop-grad operation on the diffusion loss helps prevent degradation of the latent-space structure. Similarly, the use of batch norm is useful adaptively normalizing the latent-statistics and helps improve the gFID from \\(18.09 \\rightarrow 16.3\\). Similarly, the regularization losses play a key role in maintaining the reconstruction performance of the finetuned VAE, thereby improving the gFID from \\(19.07 \\rightarrow 16.3\\)." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.573, + 0.905, + 0.753 + ], + "angle": 0, + "content": "End-to-End Training from Scratch. We next analyze the impact of VAE initialization on end-to-end training. As shown in Tab. 7, we find that while initializing the VAE from pretrained weights helps slightly improve performance, REPA-E can be used to train both VAE and LDM from scratch still achieving superior performance over REPA, which technically requires a separate stage for VAE training in addition to LDM training. For instance, while REPA achieves a FID of 5.90 after 4M iterations, REPA-E while training entirely from scratch (for both VAE and LDM) achieves much faster and better generation FID of 4.34 within just 400K iterations." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.767, + 0.842, + 0.784 + ], + "angle": 0, + "content": "4.4. Impact of End-to-End Tuning on VAE" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.79, + 0.905, + 0.881 + ], + "angle": 0, + "content": "We next analyze the impact of end-to-end tuning on the VAE. In particular, we first show that end-to-end tuning improves the latent-space structure (Fig. 6). We next show that once tuned using REPA-E, the finetuned VAEs can be used as a drop-in replacement for their original counterparts offering significantly improved generation performance." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.883, + 0.905, + 0.913 + ], + "angle": 0, + "content": "End-to-End Training improves Latent Space Structure. Results are shown in Fig. 6. Following [24], we visu" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.505, + 0.948 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.091, + 0.088, + 0.907, + 0.381 + ], + "angle": 0, + "content": "
TokenizerMethodTraining Epoches#paramsrFID↓Generation w/o CFGGeneration w/ CFG
gFID↓sFID↓IS↑Prec.↑Rec.↑gFID↓sFID↓IS↑Prec.↑Rec.↑
AutoRegressive (AR)
MaskGiTMaskGIT [4]555227M2.286.18-182.10.800.51-----
VQGANLlamaGen [45]3003.1B0.599.388.24112.90.690.672.185.97263.30.810.58
VQVAEVAR [46]3502.0B------1.80-365.40.830.57
LFQ tokenizersMagViT-v2 [52]1080307M1.503.65-200.5--1.78-319.4--
LDMMAR [27]800945M0.532.35-227.80.790.621.55-303.70.810.62
Latent Diffusion Models (LDM)
SD-VAE [40]MaskDiT [56]1600675M0.615.6910.34177.90.740.602.285.67276.60.800.61
DiT [34]1400675M9.626.85121.50.670.672.274.60278.20.830.57
SiT [30]1400675M8.616.32131.70.680.672.064.50270.30.820.59
FasterDiT [51]400675M7.915.45131.30.670.692.034.63264.00.810.60
MDT [12]1300675M6.235.23143.00.710.651.794.57283.00.810.61
MDTv2 [13]1080675M-----1.584.52314.70.790.65
Representation Alignment Methods
VA-VAE [50]LightningDiT [50]80675M0.284.29---------
800675M2.054.37207.70.770.661.254.15295.30.800.65
SD-VAEREPA [54]80675M0.617.905.06122.60.700.65-----
800675M5.845.79158.70.700.681.284.68305.70.790.64
E2E-VAE (Ours)REPA80675M0.283.464.17159.80.770.631.674.12266.30.800.63
800675M1.694.17219.30.770.671.124.09302.90.790.66
" + }, + { + "type": "table_caption", + "bbox": [ + 0.09, + 0.39, + 0.907, + 0.476 + ], + "angle": 0, + "content": "Table 9. System-Level Performance on ImageNet \\(256 \\times 256\\) comparing our end-to-end tuned VAE (E2E-VAE) with other VAEs for traditional LDM training. Note that all representation alignment methods at 800 epochs are evaluated using a class-balanced sampling protocol, as detailed in App. C. We observe that in addition to improving VAE latent space structure (Fig. 6), end-to-end tuning significantly improves VAE downstream generation performance. Once tuned using REPA-E, the improved VAE can be used as drop-in replacement for their original counterparts for accelerated generation performance. Overall, our approach helps improve both LDM and VAE performance — achieving a new state-of-the-art FID of 1.12 and 0.28, respectively for LDM generation and VAE reconstruction performance." + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.501, + 0.482, + 0.728 + ], + "angle": 0, + "content": "alize latent space structure using principal component analysis (PCA) that projects them to three channels colored by RGB. We consider three different VAEs: 1) SD-VAE [40], 2) IN-VAE (a \\(16\\times\\) downsampling, 32-channel VAE trained on ImageNet [6]). 3) VA-VAE from recent work from [50]. We observe that end-to-end tuning using REPA-E automatically improves the latent space structure of the original VAE. For instance, similar to findings of concurrent work [44], we observe that SD-VAE suffers from high noise components in the latent space. Applying end-to-end training automatically helps adjust the latent space to learn reduce noise. In contrast, other VAEs such as recently proposed VA-VAE [50] suffer from over-smother latent space. Application of E2E tuning automatically helps learn a more detailed latent-space to best support generation performance." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.731, + 0.483, + 0.913 + ], + "angle": 0, + "content": "End-to-End Training Improves VAE Performance. We next evaluate the impact of end-to-end tuning on downstream generation performance of the VAE. To this end, we first use end-to-end tuning for finetuning the recently proposed VA-VAE [50]. We then use the resulting end-to-end finetuned-VAE (named E2E-VAE), and compare its downstream generation performance with current state-of-the-art VAEs; including SDVAE [40] and VA-VAE [50]. To do this, we conduct traditional latent diffusion model training (w/o REPA-E), where only the generator network is updated while keeping the VAE frozen. Tab. 8 shows the comparison of VAE downstream generation across diverse train" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.501, + 0.907, + 0.609 + ], + "angle": 0, + "content": "ing settings. We observe that end-to-end tuned VAEs consistently outperform their original counterparts for downstream generation tasks across variations in LDM architecture and training settings. Interestingly, we observe that a VAE tuned using SiT-XL yields performance improvements even when using a different LDM architecture such as DiT-XL; thereby demonstrating the robustness of our approach." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.661, + 0.634, + 0.679 + ], + "angle": 0, + "content": "5. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.701, + 0.909, + 0.913 + ], + "angle": 0, + "content": "\"Can we unlock VAE's for performing end-to-end training with latent diffusion transformers?\" Directly backpropagating diffusion loss to the VAE is ineffective and even degrages final performance. We show that while diffusion loss is ineffective, end-to-end training can be unlocked using REPA loss. Our end-to-end training recipe (REPA-E), significantly improves latent-space structure, shows remarkable performance; speeding up diffusion model training by over \\(17 \\times\\) and \\(45 \\times\\) over REPA and vanilla training recipes. Overall, our approach achieves a new state-of-the-art results with generation FID of 1.12 and 1.69 with and without use of classifier-free guidance. We hope that our work can help foster further research for enabling end-to-end training with latent diffusion transformers." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.505, + 0.948 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.092, + 0.091, + 0.251, + 0.108 + ], + "angle": 0, + "content": "Acknowledgments" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.115, + 0.486, + 0.242 + ], + "angle": 0, + "content": "We would like to extend our deepest appreciation to Zeyu Zhang, Qinyu Zhao, and Zhanhao Liang for insightful discussions. We would also like to thank all reviewers for their constructive feedback. This work was supported in part by the Australian Research Council under Discovery Project DP210102801 and Future Fellowship FT240100820. SX acknowledges support from the OpenPath AI Foundation, IITP grant funded by the Korean Government (MSIT) (No. RS-2024-00457882) and NSF Award IIS-2443404." + }, + { + "type": "title", + "bbox": [ + 0.093, + 0.272, + 0.188, + 0.287 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.101, + 0.297, + 0.482, + 0.338 + ], + "angle": 0, + "content": "[1] Stability AI. Improved autoencoders ... https://huggingface.co/stabilityyai/sd-vae-ft-mse, n.d. Accessed: April 11, 2025. 5, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.101, + 0.341, + 0.484, + 0.424 + ], + "angle": 0, + "content": "[2] Mahmoud Assran, Quentin Duval, Ishan Misra, Piotr Bojanowski, Pascal Vincent, Michael Rabbat, Yann LeCun, and Nicolas Ballas. Self-supervised learning from images with a joint-embedding predictive architecture. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 15619–15629, 2023. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.426, + 0.484, + 0.468 + ], + "angle": 0, + "content": "[3] Dana H Ballard. Modular learning in neural networks. In Proceedings of the sixth National conference on Artificial intelligence-Volume 1, pages 279-284, 1987. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.47, + 0.484, + 0.537 + ], + "angle": 0, + "content": "[4] Huiwen Chang, Han Zhang, Lu Jiang, Ce Liu, and William T Freeman. Maskgit: Masked generative image transformer. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11315-11325, 2022. 9" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.541, + 0.484, + 0.61 + ], + "angle": 0, + "content": "[5] Junsong Chen, Jincheng Yu, Chongjian Ge, Lewei Yao, Enze Xie, Yue Wu, Zhongdao Wang, James Kwok, Ping Luo, Huchuan Lu, et al. Pixart-alpha: Fast training of diffusion transformer for photorealistic text-to-image synthesis. arXiv preprint arXiv:2310.00426, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.612, + 0.484, + 0.68 + ], + "angle": 0, + "content": "[6] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In 2009 IEEE conference on computer vision and pattern recognition, pages 248-255. IEEE, 2009. 2, 5, 6, 8, 9, 13" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.684, + 0.484, + 0.725 + ], + "angle": 0, + "content": "[7] Prafulla Dhariwal and Alexander Nichol. Diffusion models beat gans on image synthesis. Advances in neural information processing systems, 34:8780-8794, 2021. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.728, + 0.484, + 0.824 + ], + "angle": 0, + "content": "[8] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, and Neil Houlsby. An image is worth 16x16 words: Transformers for image recognition at scale. In International Conference on Learning Representations, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.827, + 0.484, + 0.883 + ], + "angle": 0, + "content": "[9] Patrick Esser, Robin Rombach, and Bjorn Ommer. Taming transformers for high-resolution image synthesis. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 12873-12883, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.094, + 0.885, + 0.484, + 0.913 + ], + "angle": 0, + "content": "[10] Patrick Esser, Sumith Kulal, Andreas Blattmann, Rahim Entezari, Jonas Müller, Harry Saini, Yam Levi, Dominik" + }, + { + "type": "list", + "bbox": [ + 0.094, + 0.297, + 0.484, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.545, + 0.093, + 0.905, + 0.148 + ], + "angle": 0, + "content": "Lorenz, Axel Sauer, Frederic Boesel, et al. Scaling rectified flow transformers for high-resolution image synthesis. In *Forty-first International Conference on Machine Learning*, 2024. 3, 13" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.15, + 0.905, + 0.219 + ], + "angle": 0, + "content": "[11] Peng Gao, Le Zhuo, Ziyi Lin, Chris Liu, Junsong Chen, Ruoyi Du, Enze Xie, Xu Luo, Longtian Qiu, Yuhang Zhang, et al. Lumina-t2x: Transforming text into any modality, resolution, and duration via flow-based large diffusion transformers. arXiv preprint arXiv:2405.05945, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.221, + 0.905, + 0.288 + ], + "angle": 0, + "content": "[12] Shanghua Gao, Pan Zhou, Ming-Ming Cheng, and Shuicheng Yan. Masked diffusion transformer is a strong image synthesizer. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 23164-23173, 2023. 9" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.291, + 0.905, + 0.344 + ], + "angle": 0, + "content": "[13] Shanghua Gao, Pan Zhou, Ming-Ming Cheng, and Shuicheng Yan. Mdtv2: Masked diffusion transformer is a strong image synthesizer. arXiv preprint arXiv:2303.14389, 2023. 9" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.347, + 0.905, + 0.386 + ], + "angle": 0, + "content": "[14] Ross Girshick. Fast r-cnn. In Proceedings of the IEEE international conference on computer vision, pages 1440-1448, 2015. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.389, + 0.905, + 0.457 + ], + "angle": 0, + "content": "[15] Ross Girshick, Jeff Donahue, Trevor Darrell, and Jitendra Malik. Rich feature hierarchies for accurate object detection and semantic segmentation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 580-587, 2014. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.46, + 0.905, + 0.515 + ], + "angle": 0, + "content": "[16] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 770-778, 2016. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.517, + 0.905, + 0.584 + ], + "angle": 0, + "content": "[17] Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, and Sepp Hochreiter. Gans trained by a two time-scale update rule converge to a local nash equilibrium. Advances in neural information processing systems, 30, 2017. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.587, + 0.905, + 0.614 + ], + "angle": 0, + "content": "[18] Jonathan Ho and Tim Salimans. Classifier-free diffusion guidance. arXiv preprint arXiv:2207.12598, 2022. 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.616, + 0.905, + 0.657 + ], + "angle": 0, + "content": "[19] Minyoung Huh, Brian Cheung, Tongzhou Wang, and Phillip Isola. The platonic representation hypothesis. In International Conference on Machine Learning, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.659, + 0.905, + 0.713 + ], + "angle": 0, + "content": "[20] Sergey Ioffe and Christian Szegedy. Batch normalization: Accelerating deep network training by reducing internal covariate shift. In International conference on machine learning, pages 448-456. pmlr, 2015. 4, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.715, + 0.905, + 0.783 + ], + "angle": 0, + "content": "[21] Dongwon Kim, Ju He, Qihang Yu, Chenglin Yang, Xiaohui Shen, Suha Kwak, and Liang-Chieh Chen. Democratizing text-to-image masked generative models with compact text-aware one-dimensional tokens. arXiv preprint arXiv:2501.07730, 2025. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.786, + 0.905, + 0.813 + ], + "angle": 0, + "content": "[22] Diederik P Kingma. Auto-encoding variational bayes. arXiv preprint arXiv:1312.6114, 2013. 1, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.815, + 0.905, + 0.855 + ], + "angle": 0, + "content": "[23] Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014.5" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.857, + 0.905, + 0.913 + ], + "angle": 0, + "content": "[24] Theodoros Kouzelis, Ioannis Kakogeorgiou, Spyros Gidaris, and Nikos Komodakis. Eq-vae: Equivalence regularized latent space for improved generative image modeling. arXiv preprint arXiv:2502.09509, 2025. 2, 3, 7, 8" + }, + { + "type": "list", + "bbox": [ + 0.517, + 0.093, + 0.905, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.51, + 0.948 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.092, + 0.482, + 0.148 + ], + "angle": 0, + "content": "[25] Tuomas Kynkänniemi, Tero Karras, Samuli Laine, Jaakko Lehtinen, and Timo Aila. Improved precision and recall metric for assessing generative models. Advances in neural information processing systems, 32, 2019. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.149, + 0.482, + 0.176 + ], + "angle": 0, + "content": "[26] Black Forest Labs. Flux. https://github.com/black-forest-labs/flux, 2024.3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.178, + 0.482, + 0.233 + ], + "angle": 0, + "content": "[27] Tianhong Li, Yonglong Tian, He Li, Mingyang Deng, and Kaiming He. Autoregressive image generation without vector quantization. Advances in Neural Information Processing Systems, 37:56424-56445, 2025. 9, 14" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.235, + 0.482, + 0.304 + ], + "angle": 0, + "content": "[28] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dólar, and C Lawrence Zitnick. Microsoft coco: Common objects in context. In European conference on computer vision, pages 740-755. Springer, 2014. 13" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.305, + 0.482, + 0.333 + ], + "angle": 0, + "content": "[29] I Loshchilov. Decoupled weight decay regularization. arXiv preprint arXiv:1711.05101, 2017. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.334, + 0.482, + 0.417 + ], + "angle": 0, + "content": "[30] Nanye Ma, Mark Goldstein, Michael S Albergo, Nicholas M Boffi, Eric Vanden-Eijnden, and Saining Xie. Sit: Exploring flow and diffusion-based generative models with scalable interpolant transformers. In European Conference on Computer Vision, pages 23-40. Springer, 2024. 2, 3, 5, 6, 7, 8, 9, 13" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.418, + 0.482, + 0.473 + ], + "angle": 0, + "content": "[31] Charlie Nash, Jacob Menick, Sander Dieleman, and Peter Battaglia. Generating images with sparse representations. In International Conference on Machine Learning, pages 7958-7968. PMLR, 2021. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.475, + 0.476, + 0.489 + ], + "angle": 0, + "content": "[32] OpenAI. Sora. https://openai.com/sora, 2024.3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.491, + 0.482, + 0.572 + ], + "angle": 0, + "content": "[33] Maxime Oquab, Timothee Darct, Theo Moutakanni, Huy Vo, Marc Szafraniec, Vasil Khalidov, Pierre Fernandez, Daniel Haziza, Francisco Massa, Alaaeldin El-Nouby, et al. Dinov2: Learning robust visual features without supervision. Transactions on Machine Learning Research Journal, pages 1-31, 2024. 4, 5, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.575, + 0.482, + 0.63 + ], + "angle": 0, + "content": "[34] William Peebles and Saining Xie. Scalable diffusion models with transformers. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 4195-4205, 2023. 3, 6, 8, 9" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.632, + 0.482, + 0.7 + ], + "angle": 0, + "content": "[35] Pablo Pernias, Dominic Rampas, Mats Leon Richter, Christopher Pal, and Marc Aubreville. Würstchen: An efficient architecture for large-scale text-to-image diffusion models. In The Twelfth International Conference on Learning Representations, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.702, + 0.482, + 0.77 + ], + "angle": 0, + "content": "[36] Dustin Podell, Zion English, Kyle Lacey, Andreas Blattmann, Tim Dockhorn, Jonas Müller, Joe Penna, and Robin Rombach. SDXL: Improving latent diffusion models for high-resolution image synthesis. In The Twelfth International Conference on Learning Representations, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.772, + 0.482, + 0.854 + ], + "angle": 0, + "content": "[37] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PMLR, 2021. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.857, + 0.482, + 0.912 + ], + "angle": 0, + "content": "[38] Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. Faster r-cnn: Towards real-time object detection with region proposal networks. IEEE transactions on pattern analysis and machine intelligence, 39(6):1137-1149, 2016. 1" + }, + { + "type": "list", + "bbox": [ + 0.093, + 0.092, + 0.482, + 0.912 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.092, + 0.906, + 0.147 + ], + "angle": 0, + "content": "[39] Sucheng Ren, Qihang Yu, Ju He, Xiaohui Shen, Alan Yuille, and Liang-Chieh Chen. Beyond next-token: Next-x prediction for autoregressive visual generation. arXiv preprint arXiv:2502.20388, 2025. 14" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.149, + 0.907, + 0.23 + ], + "angle": 0, + "content": "[40] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 10684-10695, 2022. 1, 2, 3, 4, 5, 7, 8, 9, 13, 14" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.232, + 0.906, + 0.272 + ], + "angle": 0, + "content": "[41] Leonid I. Rudin, Stanley Osher, and Emad Fatemi. Nonlinear total variation based noise removal algorithms. Physica D: Nonlinear Phenomena, 60(1):259-268, 1992. 13" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.274, + 0.906, + 0.327 + ], + "angle": 0, + "content": "[42] Tim Salimans, Ian Goodfellow, Wojciech Zaremba, Vicki Cheung, Alec Radford, and Xi Chen. Improved techniques for training gans. Advances in neural information processing systems, 29, 2016. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.329, + 0.906, + 0.37 + ], + "angle": 0, + "content": "[43] Jaskirat Singh, Stephen Gould, and Liang Zheng. High-fidelity guided image synthesis with latent diffusion models. arXiv preprint arXiv:2211.17084, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.371, + 0.906, + 0.425 + ], + "angle": 0, + "content": "[44] Ivan Skorokhodov, Sharath Girish, Benran Hu, Willi Menapace, Yanyu Li, Rameen Abdal, Sergey Tulyakov, and Aliaksandr Siarohin. Improving the diffusability of autoencoders. arXiv preprint arXiv:2502.14831, 2025. 2, 7, 9" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.427, + 0.906, + 0.481 + ], + "angle": 0, + "content": "[45] Peize Sun, Yi Jiang, Shoufa Chen, Shilong Zhang, Bingyue Peng, Ping Luo, and Zehuan Yuan. Autoregressive model beats diffusion: Llama for scalable image generation. arXiv preprint arXiv:2406.06525, 2024. 9" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.482, + 0.906, + 0.537 + ], + "angle": 0, + "content": "[46] Keyu Tian, Yi Jiang, Zehuan Yuan, Bingyue Peng, and Liwei Wang. Visual autoregressive modeling: Scalable image generation via next-scale prediction. Advances in neural information processing systems, 37:84839-84865, 2025. 9" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.538, + 0.906, + 0.592 + ], + "angle": 0, + "content": "[47] Arash Vahdat, Karsten Kreis, and Jan Kautz. Score-based generative modeling in latent space. In Advances in Neural Information Processing Systems, pages 11287-11302. Curran Associates, Inc., 2021. 3, 13" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.594, + 0.906, + 0.634 + ], + "angle": 0, + "content": "[48] Aaron Van Den Oord, Oriol Vinyals, et al. Neural discrete representation learning. Advances in neural information processing systems, 30, 2017. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.635, + 0.906, + 0.675 + ], + "angle": 0, + "content": "[49] Shuai Wang, Zhi Tian, Weilin Huang, and Limin Wang. Ddt: Decoupled diffusion transformer. arXiv preprint arXiv:2504.05741, 2025. 14" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.677, + 0.906, + 0.717 + ], + "angle": 0, + "content": "[50] Jingfeng Yao and Xinggang Wang. Reconstruction vs. generation: Taming optimization dilemma in latent diffusion models. arXiv preprint arXiv:2501.01423, 2025. 3, 7, 8, 9, 14" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.718, + 0.906, + 0.772 + ], + "angle": 0, + "content": "[51] Jingfeng Yao, Wang Cheng, Wenyu Liu, and Xinggang Wang. Fasteredit: Towards faster diffusion transformers training without architecture modification. arXiv preprint arXiv:2410.10356, 2024. 6, 9" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.774, + 0.906, + 0.842 + ], + "angle": 0, + "content": "[52] Lijun Yu, José Lezama, Nitesh B Gundavarapu, Luca Versari, Kihyuk Sohn, David Minnen, Yong Cheng, Vighnesh Birodkar, Agrim Gupta, Xiuye Gu, et al. Language model beats diffusion-tokenizer is key to visual generation. arXiv preprint arXiv:2310.05737, 2023. 9" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.843, + 0.906, + 0.911 + ], + "angle": 0, + "content": "[53] Qihang Yu, Mark Weber, Xueqing Deng, Xiaohui Shen, Daniel Cremers, and Liang-Chieh Chen. An image is worth 32 tokens for reconstruction and generation. Advances in Neural Information Processing Systems, 37:128940-128966, 2025. 3" + }, + { + "type": "list", + "bbox": [ + 0.517, + 0.092, + 0.907, + 0.911 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.092, + 0.482, + 0.161 + ], + "angle": 0, + "content": "[54] Sihyun Yu, Sangkyung Kwak, Huiwon Jang, Jongheon Jeong, Jonathan Huang, Jinwoo Shin, and Saining Xie. Representation alignment for generation: Training diffusion transformers is easier than you think. arXiv preprint arXiv:2410.06940, 2024. 2, 3, 4, 5, 6, 7, 8, 9, 13" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.164, + 0.482, + 0.218 + ], + "angle": 0, + "content": "[55] Kaiwen Zha, Lijun Yu, Alireza Fathi, David A Ross, Cordelia Schmid, Dina Katabi, and Xiuye Gu. Language-guided image tokenization for generation. arXiv preprint arXiv:2412.05796, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.22, + 0.482, + 0.261 + ], + "angle": 0, + "content": "[56] Hongkai Zheng, Weili Nie, Arash Vahdat, and Anima Anandkumar. Fast training of diffusion models with masked transformers. arXiv preprint arXiv:2306.09305, 2023. 6, 9" + }, + { + "type": "list", + "bbox": [ + 0.093, + 0.092, + 0.482, + 0.261 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.142, + 0.086, + 0.856, + 0.13 + ], + "angle": 0, + "content": "REPA-E: Unlocking VAE for End-to-End Tuning with Latent Diffusion Transformers" + }, + { + "type": "text", + "bbox": [ + 0.382, + 0.142, + 0.615, + 0.163 + ], + "angle": 0, + "content": "Supplementary Material" + }, + { + "type": "table", + "bbox": [ + 0.095, + 0.178, + 0.48, + 0.25 + ], + "angle": 0, + "content": "
Training StrategySpatial VarianceTotal Variation
w/o E2E Tuning17.066627.35
E2E w/ REPA Loss18.025516.14
E2E w/ Diff. Loss0.0289.80
" + }, + { + "type": "table_footnote", + "bbox": [ + 0.09, + 0.26, + 0.483, + 0.414 + ], + "angle": 0, + "content": "Table 10. Impact of Naive End-to-End Training with Diffusion Loss. We report total variation [41] and mean variance along each VAE latent channel for three training settings: 1) Standard LDM training (w/o end-to-end (E2E) tuning), 2) Naive E2E tuning with Diffusion loss, 3) E2E tuning with REPA loss [54]. All experiments use SDVAE for VAE initialization. We observe that using diffusion loss for end-to-end tuning encourages learning a simpler latent space with lower variance along the spatial dimensions (Fig. 3a). The simpler latent space is easier for denoising objective (\\(\\S 3.1\\)), but degrades final generation performance (Fig. 1). All results are reported at 400K iterations with SiT-XL/2 [30] as LDM." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.439, + 0.466, + 0.456 + ], + "angle": 0, + "content": "A. Impact of Diffusion Loss on Latent Space" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.464, + 0.483, + 0.614 + ], + "angle": 0, + "content": "We analyze the effect of naively using diffusion loss for end-to-end tuning, focusing on how it alters the latent space structure. All experiments here use SD-VAE for tokenizer initialization and SiT-XL/2 [30] as the latent diffusion model, trained for 400K iterations without classifier-free guidance. We report two metrics to quantify latent structure, 1) Spatial Variance, computed as the mean per-channel variance across spatial dimensions, and 2) Total Variation [41], which captures local spatial differences in the latent map." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.615, + 0.483, + 0.722 + ], + "angle": 0, + "content": "As shown in Tab. 10 and Fig. 3, directly backpropagating the diffusion loss leads to reduced spatial variance, which creates an easier denoising problem by hacking the latent space but leads to reduced image generation performance. In contrast, end-to-end training with REPA-E not only leads to improved generation performance but also improves the latent space structure for the underlying VAE (Fig. 3, 6)." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.735, + 0.285, + 0.753 + ], + "angle": 0, + "content": "B. Additional Analysis" + }, + { + "type": "table", + "bbox": [ + 0.093, + 0.774, + 0.483, + 0.845 + ], + "angle": 0, + "content": "
MethodgFID ↓sFID ↓IS ↑Prec. ↑Rec. ↑
REPA + E2E-Diffusion444.1460.31.490.000.00
REPA + E2E-LSGM9.895.07107.50.720.61
REPA-E (Ours)4.074.60161.80.760.62
" + }, + { + "type": "table_footnote", + "bbox": [ + 0.091, + 0.847, + 0.483, + 0.876 + ], + "angle": 0, + "content": "Table 11. Comparison with LSGM Objective. REPA-E shows better generation performance and convergence speed." + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.882, + 0.484, + 0.913 + ], + "angle": 0, + "content": "Comparison of End-to-End Training Objectives. We provide additional results comparing different objectives for" + }, + { + "type": "table", + "bbox": [ + 0.515, + 0.178, + 0.908, + 0.236 + ], + "angle": 0, + "content": "
MethodgFID↓sFID↓IS↑Prec.↑Rec.↑
REPA + SiT-L22.25.6858.30.740.60
REPA-E + SiT-L12.84.6090.60.790.61
" + }, + { + "type": "table_footnote", + "bbox": [ + 0.513, + 0.238, + 0.907, + 0.307 + ], + "angle": 0, + "content": "Table 12. Scaling REPA-E to Higher Resolution. System-level results on ImageNet-512 with \\(64 \\times 64\\) latents using SiT-L at 100K steps without classifier-free guidance. We observe that REPA-E leads to significant performance improvements over vanilla-REPA [54] even at high resolutions." + }, + { + "type": "table", + "bbox": [ + 0.516, + 0.318, + 0.905, + 0.37 + ], + "angle": 0, + "content": "
SamplerODE, NFE=50SDE, NFE=250
gFIDVA-VAE 5.43E2E-VAE 5.02VA-VAE 5.57E2E-VAE 4.97
" + }, + { + "type": "table_footnote", + "bbox": [ + 0.513, + 0.372, + 0.906, + 0.428 + ], + "angle": 0, + "content": "Table 13. Generalization to T2I Tasks. FID results on MSCOCO text-to-image generation using MMDiT + REPA. We find that end-to-end tuned VAEs (E2E-VAE) also generalizes to T2I tasks showing improved generation performance." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.451, + 0.906, + 0.542 + ], + "angle": 0, + "content": "end-to-end training of VAE and LDM. Specifically, we evaluate: 1) naive E2E training by backpropagating diffusion loss to VAE encoder, 2) the LSGM entropy-regularized objective [47], 3) our proposed REPA-E. All methods are trained with SiT-XL for 400K steps under consistent settings." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.544, + 0.906, + 0.621 + ], + "angle": 0, + "content": "The LSGM objective prevents feature collapse by maximizing entropy of the latent space. However, as shown in Tab. 11, our REPA-E formulation yields better performance across all metrics at just \\(400\\mathrm{K}\\) steps, with significantly faster convergence and stronger generation quality." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.622, + 0.906, + 0.744 + ], + "angle": 0, + "content": "Scaling REPA-E to Higher Latent Resolution. We conduct experiments on ImageNet-512 [6] to evaluate the performance of REPA-E under higher-resolution latent settings \\((64 \\times 64)\\). We use SD-VAE [40] as the tokenizer and SiT-L as the diffusion model, trained for 100K steps and we report the performance without classifier-free guidance. As shown in Tab. 12, our approach yields significant improvements in generation quality compared to REPA." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.746, + 0.907, + 0.913 + ], + "angle": 0, + "content": "MSCOCO Text-to-Image Generation with E2E-VAE. To further evaluate the utility of the tuned VAE beyond ImageNet, we assess its performance in a text-to-image generation (T2I) setting on MSCOCO [28]. Following REPA [54], we adopt MMDiT [10] as the diffusion backbone and apply REPA loss across all variants. All models are trained for 100K steps and evaluated using classifier-free guidance with \\(\\alpha_{\\mathrm{cfg}} = 2.0\\) and EMA weights during inference. We report generation FID, and observe that replacing VA-VAE with our E2E-VAE consistently improves downstream text-to-image generation quality (Tab. 13)." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.092, + 0.089, + 0.482, + 0.214 + ], + "angle": 0, + "content": "
AutoencoderPSNR↑SSIM↑LPIPS↓rFID↓
SD-VAE [40]25.670.720.130.74
+REPA-E (Ours)24.840.710.150.53
IN-VAE (f16d32)27.400.800.090.26
+REPA-E (Ours)26.870.780.110.27
VA-VAE [50]26.320.760.110.28
+REPA-E (Ours)26.250.750.110.28
" + }, + { + "type": "table_footnote", + "bbox": [ + 0.093, + 0.218, + 0.483, + 0.273 + ], + "angle": 0, + "content": "Table 14. VAE Reconstruction Evaluation on ImageNet-256. While REPA-E primarily improves the generative capability of the VAE (see Tab. 9), it also maintains competitive reconstruction quality across all metrics." + }, + { + "type": "title", + "bbox": [ + 0.095, + 0.299, + 0.355, + 0.315 + ], + "angle": 0, + "content": "C. Remarks on FID Evaluation" + }, + { + "type": "text", + "bbox": [ + 0.094, + 0.324, + 0.482, + 0.505 + ], + "angle": 0, + "content": "Throughout the paper, we follow the standard ImageNet conditional evaluation protocol, where 50,000 images are generated by randomly sampling class labels. Recent papers [27, 39, 49] have adopted class-balanced generation for evaluation, where 50 images per class are generated across the 1,000 categories. To our surprise, we found that using class-balanced sampling yields slightly better FID performance. Therefore, for the results in Tab. 9, we adopt this class-balanced sampling strategy. Accordingly, all representation alignment methods at the 800-epoch checkpoint in this table are computed under the balanced sampling protocol to ensure a fair and consistent comparison." + }, + { + "type": "page_number", + "bbox": [ + 0.492, + 0.936, + 0.508, + 0.947 + ], + "angle": 0, + "content": "14" + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_10xxx/2504.10483/9b7bb575-f36f-48cf-a562-8fb8239c8a45_origin.pdf b/data/2025/2504_10xxx/2504.10483/9b7bb575-f36f-48cf-a562-8fb8239c8a45_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..071b06ce51d67e461b5a79d04a00f94d0a086e82 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10483/9b7bb575-f36f-48cf-a562-8fb8239c8a45_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9df68470aca002de3eca5dd353c120ec3fce86e7275967da09f095a12b2e712b +size 2729401 diff --git a/data/2025/2504_10xxx/2504.10483/full.md b/data/2025/2504_10xxx/2504.10483/full.md new file mode 100644 index 0000000000000000000000000000000000000000..b61d973d0dfe3686157b962646013ae6297b2275 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10483/full.md @@ -0,0 +1,410 @@ +# REPA-E: Unlocking VAE for End-to-End Tuning with Latent Diffusion Transformers + +Xingjian Leng $^{\alpha \star}$ Jaskirat Singh $^{\alpha \star}$ Yunzhong Hou $^{\alpha}$ Zhenchang Xing $^{\beta}$ Saining Xie $^{\chi}$ Liang Zheng $^{\alpha}$ + +$^{\alpha}$ Australian National University $\beta$ Data61 CSIRO $\chi$ New York University + +{xingjian.length\*, jaskirat.singh\*, yunzhong.hou, liang.zheng}@anu.edu.au + +zhenchang.xing@data61.csiro.au saining.xie@nyu.edu + +![](images/6e3d867ad461854a2b4897fe73b4d029c7a1b0fe01e5b82ec27c5b3c3fbac347.jpg) +a) Traditional LDM Training +Figure 1. Can we unlock VAE for end-to-end tuning with latent-diffusion models? - Traditional deep learning wisdom dictates that end-to-end training is often preferable when possible. However, latent diffusion models usually only update the generator network while keeping the variational auto-encoder (VAE) fixed (a). This is because directly using the diffusion loss to update the VAE (b) causes the latent space to collapse. We show that while direct diffusion-loss is ineffective, end-to-end training can be unlocked through the representation-alignment (REPA) loss - allowing both encoder and diffusion model to be jointly tuned during the training process (c). Notably, this allows for significantly accelerated training; speeding up training by over $17\times$ and $45\times$ over REPA and vanilla training recipes, respectively (d). + +![](images/a9541f9988db720abb731ca660a0d104cdb172ec8d219145d7c412f88112e0a4.jpg) +b) Naive End-to-End LDM Training +c) REPA-E (Ours) + +![](images/3d0d5d48be88986c8ac48ee99088103e4823610d4c41d445054fcca80dcf74c9.jpg) +d) Training Steps vs. FID-50K Improved Generation Performance + +# Abstract + +In this paper we tackle a fundamental question: "Can we train latent diffusion models together with the variational auto-encoder (VAE) tokenizer in an end-to-end manner?" Traditional deep-learning wisdom dictates that end-to-end training is often preferable when possible. However, for latent diffusion transformers, it is observed that end-to-end training both VAE and diffusion-model using standard diffusion-loss is ineffective, even causing a degradation in final performance. We show that while diffusion loss is ineffective, end-to-end training can be unlocked through the representation-alignment (REPA) loss - allowing both VAE and diffusion model to be jointly tuned during the training process. Despite its simplicity, the proposed training recipe (REPA-E) shows remarkable performance; speeding up diffusion model training by over $17 \times$ and $45 \times$ over REPA and + +vanilla training recipes, respectively. Interestingly, we observe that end-to-end tuning with REPA-E also improves the VAE itself; leading to improved latent space structure and downstream generation performance. In terms of final performance, our approach sets a new state-of-the-art; achieving FID of 1.12 and 1.69 with and without classifier-free guidance on ImageNet $256 \times 256$ . Code is available at https://end2end-diffusion.github.io. + +# 1. Introduction + +End-to-end training has propelled the field forward for the past decade. It is understood that incorporating more components into end-to-end training can lead to increased performance, as evidenced by the evolution of the RCNN family [14, 15, 38]. With that said, training schemes of latent diffusion models (LDMs) [40] remain two-stage: first, the variational auto-encoder (VAE) [22] is trained with the re + +![](images/58248ecea17233f3151ed2c3fa2b6660f014b9cc50ef2d343f2fafeecf08c27b.jpg) +(a) PCA Analysis on VAE Latent Space Structure + +![](images/6ff1982f4ad41e9b7eb323cd44e02190df89a9a8943279c88efeddf464d1cd62.jpg) +(b) Performance Improvements with REPA-E (400K Steps) +Figure 2. End-to-End Training Automatically Improves VAE Latent-Space Structure. (a) Following [24], we visualize latent space structure from different VAEs before and after end-to-end training using principal component analysis (PCA) that projects them to three channels colored by RGB. We consider SD-VAE [40], and IN-VAE $^1$ , a $16 \times$ downsampling, 32-channel VAE trained on ImageNet [6]. For SD-VAE we find that latent representations have high-frequency noise. Applying end-to-end tuning helps learning a more smooth and less noisy latent representation. Interestingly to the contrast, the latent space for IN-VAE is over-smoothed (e.g., row-2). Applying end-to-end tuning automatically helps learn a more detailed latent space structure to best support final generation performance. (b) Jointly tuning both VAE and latent diffusion model (LDM) significantly improves final generation performance (gFID) across different VAE architectures. + +construction loss; then, the diffusion model is trained with the diffusion loss while keeping the VAE fixed (see Fig. 1a). + +The above two-stage division of the LDM training process, though popular, leads to a challenging optimization task: "How to best optimize the representation from first stage (VAE) for optimal performance while training the second stage (diffusion model)?" While recent works study the interplay between the performance of the two stages [24, 44], they are often limited to empirical analysis, which may vary depending on the architecture and training setting for both the VAE and the diffusion model. For instance, in a concurrent work [44] show that the latent space of popular autoencoders e.g., SD-VAE [40] suffer from high-frequency noise / components. However, as seen in Fig. 2 & 6, while the same holds for some VAEs (e.g. SD-VAE), it might not be true for other VAE architectures — which instead might suffer from an over-smoothed latent space (Fig. 2, 6). + +In this paper, we therefore ask a fundamental question: "Can we jointly tune both VAE and LDM in an end-to-end manner to best optimize final generation performance?" Technically, it is straightforward to do end-to-end LDM training by simply back-propagating the diffusion loss to the VAE tokenizer. However, experiments (§3) reveal that this naive approach for end-to-end training is ineffective. The diffusion loss encourages learning a simpler latent space structure which is easier for denoising objective (refer §3.1), but leads to reduced generation performance (Fig. 1d). + +To address this, we propose REPA-E; an end-to-end training recipe using representation alignment loss [54]. We show that while the diffusion loss is ineffective, end-to-end tuning can be unlocked through the recently proposed representation-alignment (REPA) loss - allowing both VAE + +and diffusion model to be jointly tuned during training process. Through extensive evaluations, we demonstrate that end-to-end tuning with REPA-E offers several advantages; End-to-End Training Leads to Accelerated Generation Performance; speeding up diffusion training by over $17 \times$ and $45 \times$ over REPA and vanilla training recipes (Fig. 1d). Furthermore, it also helps significantly improve the final generation performance. For instance as seen in Fig. 1d, we find that when using the popular SiT-XL [30] architecture, REPA-E reaches an FID of 4.07 within 400K steps, significantly boosting final performance over even REPA which only only reaches a final FID for 5.9 after 4M steps [54]. + +End-to-End Training improves VAE latent-space structure. As seen in Fig. 2 and §4.4, we find that jointly tuning the VAE and latent diffusion model during training, automatically improves the latent space structure across different VAE architectures. For instance, for SD-VAE [40], it is observed that the original latent space suffers from high-frequency noise (Fig. 2). Applying end-to-end tuning helps learn a more smooth latent space representation. In contrast, the latent space for IN-VAE1 is over-smoothed. Applying REPA-E automatically helps learn more detailed latent space structure to best support generation performance. End-to-End Tuning Improves VAE Performance. Finally, we find that once tuned using REPA-E, the end-to-end tuned VAE can be used as a drop-in replacement for their original counterparts (e.g. SD-VAE) showing improved generation performance across diverse training settings and model architectures (refer §4.4). + +To summarize, key contributions of this paper are: 1) We propose REPA-E; an end-to-end training recipe for jointly + +tuning both VAE and LDM using representation alignment loss (§3). 2) We find that despite its simplicity, REPA-E leads to accelerated generation performance; speeding up diffusion training by over $17 \times$ and $45 \times$ over REPA and vanilla training recipes, respectively (§4.2). 3) We show that end-to-end training is able to adaptively improve the latent space structure across diverse VAE architectures. 4) We demonstrate that once tuned using REPA-E, the end-to-end tuned VAE can be used as a drop-in replacement for their original counterparts (e.g., SD-VAE), exhibiting significantly better downstream generation performance (§4.4). + +# 2. Related Work + +Tokenizer or autoencoders (AE) [3] use either the variational objective [22] for continuous tokenization or a vector quantization objective [9, 48] for discrete tokenization [8-10, 16, 21, 22, 36, 40, 48, 53, 55]. However, current tokenizers are primarily trained for minimizing the reconstruction error, which maybe not provide the optimal latent space for generation [24]. We show that improved latent space structure is achieved by end-to-end training of LDMs. + +Latent diffusion models leverage pre-trained image tokenizers to compress images into a lower-dimensional latent space to simplify the generative task [5, 10, 10, 11, 26, 32, 36, 40, 43, 47]. Despite their effectiveness, existing tokenizers and diffusion models are trained separately [10, 36, 40]. In this paper, we explore jointly optimizing tokenizers and diffusion models to achieve faster convergence and improved generation performance (Sec. 4). + +Representation alignment for generative learning has recently shown huge promise for improving the training speed and performance of diffusion models [35, 50, 54]. We find that instead of applying the REPA loss separately over LDM [54] or VAE [50], significantly better performance and training speed can be achieved through E2E training. + +End-to-End Diffusion. LSGM [47] explores joint training with score-based generative models, which uses a variational lower bound objective with an entropy term for preventing latent space collapse while backpropagating the diffusion loss. We empirically find that while this helps prevent latent space collapse, REPA-E shows significantly faster convergence during E2E training (refer App. B). + +# 3. REPA-E: Unlocking VAE for Joint Training + +Overview. Given a variational autoencoder (VAE) and latent diffusion transformer (e.g., SiT [30]), we wish to jointly tune the VAE latent representation and diffusion model features in an end-to-end manner to best optimize the final generation performance. To this end, we first make three key insights in §3.1: 1) Naive end-to-end tuning - directly back-propagating the diffusion loss to the VAE is ineffective. The diffusion loss encourages learning a more simpler latent space structure (Fig. 3a) which is easier for min + +imizing the denoising objective [40], but degrades the final generation performance. We next analyze the recently proposed representation-alignment loss [54] showing that; 2) Higher representation-alignment score [54] correlates with improved generation performance (Fig. 3b). This offers an alternate path for improving final generation performance using representation-alignment score as a proxy. 3) The maximum achievable alignment score with vanilla-REPA is bottlenecked by the VAE latent space features. We further show that backpropagating the REPA loss to the VAE during training can help address this limitation, significantly improving final representation-alignment score (Fig. 3c). + +Given the above insights, we finally propose REPA-E (§3.2); an end-to-end tuning recipe for both VAE and LDM features. Our key idea is simple: instead of directly using diffusion loss for end-to-end tuning, we can use the representation alignment score as a proxy for the final generation performance. This motivates our final approach, where instead of the diffusion loss, we propose to perform end-to-end training using the representation-alignment loss. The end-to-end training with REPA loss helps better improve the final representation-alignment score (Fig. 3b), which in turn leads to improved final generation performance (§3.1). + +# 3.1. Motivating End-to-End Training with REPA + +Naive End-to-End Tuning is Ineffective. We first analyze the naive approach for end-to-end tuning; directly backpropagating the diffusion loss to the VAE tokenizer. As shown in Fig. 3a, we observe that directly backpropagating the diffusion loss encourages learning a more simpler latent space structure with lower variance along the spatial dimensions (Tab. 10). The simpler latent-space structure poses an easier problem for the denoising objective [40], but leads to reduced generation performance (Fig. 1). Consider an intermediate latent $z_{t} = \alpha_{t}z_{\mathrm{VAE}} + \sigma_{t}\epsilon_{orig}$ for any timestep $t$ . The denoising objective [34] mainly aims to predict $\epsilon_{pred}$ ; estimating the originally added noise $\epsilon_{orig}$ from VAE features $z_{\mathrm{VAE}}$ and timestep $t$ . As the variance along the spatial dimensions for VAE latent $z_{\mathrm{VAE}}$ goes down, the denoising objective effectively reduces to predicting a bias term for recovering back the originally added noise $\epsilon_{orig}$ . Thus, backpropagation the diffusion loss effectively hacks the latent space structure to create an easier denoising problem, but leads to a reduced generation performance (Fig. 1). Higher Representation Alignment Correlates with Better Generation Performance. Similar to the findings of [54], we also measure representation alignment using CKNNA scores [19] across different model sizes and training iterations. As seen in Fig. 3b, we observe that higher representation alignment during the training process leads to improved generation performance. This suggests an alternate path for improving generation performance by using the representation alignment objective instead of the diffusion loss for end-to-end training (refer §3.2). + +![](images/aed0aebb7aa7f26a90775c3d741b04e5ee30a74fde9c4116c849efd5d8dfb03f.jpg) +RGB +Image + +![](images/678247a76edebf75eef996545bf9f71dcd7acf3febf0a69f2a6874afe513f021.jpg) +SDVAE +w/o E2E + +![](images/737ee93bcfc4726d031fc27ae03451e0ae5dc24d68c85911901ac5255cefdf3a.jpg) +E2E with +REPA Loss + +![](images/ecca1ad26ed6740024c2b424367c42553d5a44b65aea95da9b3a313377175628.jpg) +E2E with +Diff, Loss + +![](images/3a884ef9a7201faa6ce809763a9801e6306cc17f1db558b545d4255c6db7f8fb.jpg) + +![](images/3b2ef12788c4a2120146d9f5d497dc212500338f2b6db3d99890fedb7710f1b6.jpg) + +![](images/6d94cb8882bfecafaab1bbb1acfc06fe066cc5249188812276c3541185a1e8ca.jpg) + +![](images/2062b30ebd77b104482e0ace1af3c36cfdc092c6d22e5d9f827fdc42a5cdf62f.jpg) + +![](images/026a2509afc429c08ebf20dbfc03dde3f7f9882b223f893a1155f70e028d28db.jpg) +(a) PCA Visualization of Latent Spaces + +![](images/d1e9c9c23709cb744c68d3c891fec414dcd095ab25592add4488c6349b77a306.jpg) + +![](images/ae8c4a8f75e3bc72d7182c5a9a929d9d0aa2a4238ec623bfea7b8c6ef63012e0.jpg) + +![](images/96122cd52837ce36699d9fc4d592f85bbb3c75af36fa0f639b4dd5f6896b3775.jpg) + +![](images/4084e4f8a694c979c3b8b8e5e18a0d0a6961457d400ae16e873a14a36c04d873.jpg) +(b) Correlation: gFID & CKNNA Score + +![](images/3af1810a7f98b4e4da924a364be17b210eb220e9b0b6c4f9bb76ce63a6aa8af7.jpg) +(c) E2E tuning with REPA improves CKNNA Score +Figure 3. Motivating End-to-End Tuning using Representation Alignment (REPA) Loss. We make three key insights: 1) Naive end-to-end (E2E) tuning using diffusion loss is ineffective. The diffusion encourages learning a more simpler latent space structure (a) which is easier for denoising objective (refer §3.1) but degrades final generation performance (Fig. 1). We next analyze the recently proposed representation alignment (REPA) loss [54] showing: 2) Higher representation alignment (CKNNA) leads to better generation performance. This suggests an alternate path for improving performance by using representation-alignment (CKNNA) as proxy for generation performance. 3) The maximum achievable CKNNA score with vanilla-REPA is bottlenecked by the VAE features (c) saturating around $\sim 0.42$ . Back-propagating the REPA-loss to the VAE helps address this limitation and improve the final CKNNA score. Given the above insights: we propose REPA-E ( $\S 3.2$ ) for end-to-end LDM training. The key idea is simple: instead of using the diffusion loss, we perform end-to-end training using the REPA loss. The end-to-end training with REPA loss helps improve the final representation-alignment (CKNNA), which in turn leads to improved generation performance ( $\S 4$ ). + +Representation Alignment is Bottlenecked by the VAE Features. Fig. 3c shows that while the naive application of REPA loss [54] leads to improved representation-alignment (CKNNA) score, the maximum achievable alignment score is still bottlenecked the VAE features saturating around a value of 0.4 (maximum value of 1). Furthermore, we find that backpropagating the representation-alignment loss to the VAE helps address this limitation; allowing end-to-end optimization of the VAE features to best support representation-alignment objective [54]. + +# 3.2. End-to-End Training with REPA + +Given the above insights, we next propose REPA-E (§3.2); an end-to-end tuning recipe for jointly training both VAE and LDM features. Instead of directly using diffusion loss, we propose to perform end-to-end training using the representation-alignment loss. The end-to-end training with REPA loss helps better improve the final representation-alignment score (Fig. 3c), which in turn leads to improved final generation performance (refer §4.2). We next discuss key details for implementation of REPA-E for training. + +Batch-Norm Layer for VAE Latent Normalization. To enable end-to-end training, we first introduce a batchnorm layer between the VAE and latent diffusion model (Fig. 1). Typical LDM training involves normalizing the VAE features using precomputed latent statistics (e.g., std $= 1 / 0.1825$ for SD-VAE [40]). This helps normalize the VAE latent outputs to zero mean and unit variance for more efficient training for the diffusion model. However, with end-to-end training the statistics need to be recomputed whenever the VAE model is updated - which is expensive. To address this, we propose the use of a batch + +norm layer [20] which uses the exponential moving average (EMA) mean and variance as a surrogate for dataset-level statistics. The batch-norm layer thus acts as a differentiable normalization operator without the need for recomputing dataset level statistics after each optimization step. + +End-to-End Representation-Alignment Loss. We next enable end-to-end training, by using the REPA loss [54] for updating the parameters for both VAE and LDM during training. Formally, let $\mathcal{V}_{\phi}$ represent the VAE, $\mathcal{D}_{\theta}$ be the diffusion model, $f$ be the fixed pretrained perceptual model (e.g., DINO-v2 [33]) for REPA [54] and $\mathbf{x}$ be a clean image. Also similar to REPA, consider $h_{\omega}(\mathbf{h}_t)$ be the projection of diffusion transformer output $\mathbf{h}_t$ through a trainable projection layer $h_{\omega}$ . We then perform end-to-end training by applying the REPA loss over both LDM and VAE as, + +$$ +\mathcal {L} _ {\mathrm {R E P A}} (\theta , \phi , \omega) = - \mathbb {E} _ {\mathbf {x}, \epsilon , t} \left[ \frac {1}{N} \sum_ {n = 1} ^ {N} \operatorname {s i m} \left(\mathbf {y} ^ {[ n ]}, h _ {\omega} \left(\mathbf {h} _ {t} ^ {[ n ]}\right)\right) \right], +$$ + +where $\mathbf{y} = f(\mathbf{x})$ is the output of the pretrained perceptual model (e.g., DINO-v2 [33]), $N$ is number of patches, $\mathrm{sim}(< ., . >)$ computes the patch-wise cosine similarities between pretrained representation $\mathbf{y}$ from perceptual model (e.g., DINO-v2) and diffusion transformer hidden state $\mathbf{h}_t$ . + +Diffusion Loss with Stop-Gradient. As discussed in Fig. 3a and §3.1, backpropagating the diffusion loss to the VAE causes a degradation of latent-space structure. To avoid this, we introduce a simple stopgrad operation which limits the application of diffusion loss $\mathcal{L}_{\mathrm{DIFF}}$ to only the parameters $\theta$ of the latent diffusion model $\mathcal{D}_{\theta}$ . + +VAE Regularization Losses. Finally, we introduce regularization losses $\mathcal{L}_{\mathrm{REG}}$ for VAE $\nu_{\phi}$ , to ensure that the end-to-end training process does not impact the reconstruction + +![](images/b0f6d7040a425954eeab73ec3015199d815cfcfb9afc1f1b22d8e551e28f6b7f.jpg) +Figure 4. End-to-End Tuning (REPA-E) Improves Visual Scaling. We observe that REPA-E produces higher-quality images at $400\mathrm{K}$ steps compared with the vanilla-REPA and generates more structurally meaningful images even in the early stages of training. Results for both methods are sampled using the same seed, noise and class label. We use a classifier-free guidance scale of 4.0 during sampling. + +performance (rFID) of the original VAE. In particular, following [1], we use three losses, 1) Reconstruction Losses $(\mathcal{L}_{\mathrm{MSE}},\mathcal{L}_{\mathrm{LPIPS}})$ , 2) GAN Loss $(\mathcal{L}_{\mathrm{GAN}})$ , 3) KL divergence loss $(\mathcal{L}_{\mathrm{KL}})$ as regularization loss $\mathcal{L}_{\mathrm{REG}}$ for the VAE $\nu_{\phi}$ . + +Overall Training. The overall training is then performed in an end-to-end manner using the following loss, + +$$ +\mathcal {L} (\theta , \phi , \omega) = \mathcal {L} _ {\mathrm {D I F F}} (\theta) + \lambda \mathcal {L} _ {\mathrm {R E P A}} (\theta , \phi , \omega) + \eta \mathcal {L} _ {\mathrm {R E G}} (\phi), +$$ + +where $\theta, \phi, \omega$ refer to the parameters for the LDM, VAE and trainable REPA projection layer [54], respectively. Further implementation details are provided in §4.1 and Appendix. + +# 4. Experiments + +We next validate the performance of REPA-E and the effect of proposed components through extensive evaluation. In particular, we investigate three key research questions: + +1. Can REPA-E significantly improve generation performance and training speed? (Sec. 4.2, Tab. 1, Fig. 1, 4) +2. Does REPA-E generalize across variations in training settings including model-scale, architecture, encoder model for REPA etc.? (Sec. 4.3, Tab. 2, 3, 4, 5, 6, 7) +3. Analyze the impact of end-to-end tuning (REPA-E) on VAE latent-space structure and downstream generation performance. (please refer Sec. 4.4, Fig. 6, Tab. 8, 9) + +# 4.1. Setup + +Implementation Details. We follow the same setup as in SiT [30] and REPA [54] unless otherwise specified. All training is conducted on the ImageNet [6] training split. We adopt the same data preprocessing protocol as + +in ADM [7], where original images are center-cropped and resized to $256 \times 256$ resolution. We experiment with publicly available VAEs, including SD-VAE (f8d4) [40], VA-VAE (f16d32) [40], and our own f16d32 VAE trained on ImageNet, referred to as IN-VAE. Depending on the VAE downsampling rate, we adopt SiT-XL/1 and SiT-XL/2 for $4 \times$ and $16 \times$ downsampling rates, respectively, where 1 and 2 denote the patch sizes in the transformer embedding layer. We disable affine transformations in the BN [20] layer between the VAE and SiT, relying solely on the running mean and standard deviation. The VAE regularization loss combines multiple objectives and is defined as: $\mathcal{L}_{\mathrm{REG}} = \mathcal{L}_{\mathrm{KL}} + \mathcal{L}_{\mathrm{MSE}} + \mathcal{L}_{\mathrm{LPIPS}} + \mathcal{L}_{\mathrm{GAN}}$ . For alignment loss, we use DINOv2 [33] as external visual features and apply alignment to the eighth layer of the SiT model. Empirically, we set the alignment loss coefficient to $\lambda_{\mathrm{REPA}_g} = 0.5$ for updating SiT and $\lambda_{\mathrm{REPA}_v} = 1.5$ for VAE. For optimization, we use AdamW [23, 29] with a constant learning rate of $1 \times 10^{-4}$ , and a global batch size of 256. During training, we apply gradient clipping and exponential moving average (EMA) to the generative model for stable optimization. All experiments are conducted on 8 NVIDIA H100 GPUs. + +Evaluation. For image generation evaluation, we strictly follow the ADM setup [7]. We report generation quality using Fréchet inception distance (gFID) [17], structural FID (sFID) [31], inception score (IS) [42], precision (Prec.) and recall (Rec.) [25], measured on 50K generated images. For sampling, we follow the approach in SiT [30] and REPA [54], using the SDE Euler-Maruyama sampler with 250 steps. In terms of VAE benchmark, we measure the reconstruction FID (rFID) on 50K images from the Im + +![](images/c0ccebd2a37cbf0f831d5a90307f3271837c5bd97b301ad920db8dddccafe7f4.jpg) + +![](images/591dcd624ebe552ccf98780d13bf06ce9ec7c6a6452497a963591a1120d0e0bc.jpg) + +![](images/98d5346a3f5d469ff855020e5e08533d39161900a58da1f5b6d2bc7be4a914b7.jpg) + +![](images/fd12988f16d053f4c42208367c590281d1f1ccceeec126036badaef31a99e14a.jpg) + +![](images/ffb775a5a42001303337822551611cf8813cb9ca3412dea2c0caff6206c92060.jpg) + +![](images/9b918e4e0af32f5170c59f334fc411b4925cc05d2d86c173ad1e1740e0c6ca05.jpg) + +![](images/732c2cd3f89282ebd0a31c3d19337f2d8bfc6478b1fdfeb81e104bd6b2f3b41d.jpg) + +![](images/2578ec0c923a1161d476acd025b22fe4449057186fb959695c72d26185dc88cf.jpg) + +![](images/b04f9900754a787b98e10d234ab8b15d8eca65c470b50220f75f889f9a11f468.jpg) + +![](images/938d6a0dae447cad9ea39e63107549156e33218b9ca6a3565b0607e1df7efb1c.jpg) + +![](images/70e94662b383073ac040caac999f97c380f6c9dac51ab4b893a0a6a3f105693c.jpg) + +![](images/be5f24596e485bced0e6830cd32cfd126187b4d5cb069e03094ce1e4cef97c4d.jpg) + +![](images/23a834c33b92caf6f54237aa86afb54c1667eec855ce1c31c92f32512830e58c.jpg) +Figure 5. Qualitative Results on Imagenet $256 \times 256$ using E2E-VAE and SiT-XL. We use a classifier-free guidance scale $\alpha_{\mathrm{cfg}} = 4.0$ . + +![](images/10a3b80800ca4cbbe8a8383df426c38aec1dc6cebe1d8c22ac60df810291d921.jpg) + +![](images/e6f4ef56876b31e928da107740a6f9255035f413280dccaa59cbef9c0972b9c6.jpg) + +![](images/d01256bacff19b6a3cc55c94487b9b303cf3b4501268a5a38f314e22bf356833.jpg) + +![](images/04d75970ad292add98f1113716832632b5e8e141ef16b0ddb372ed0b172873c5.jpg) + +![](images/3424b3de6794f2e2560bbb40c232b2c343909d6fe3029b8fca2169b1dde57471.jpg) + +
MethodTokenizerEpochsgFID↓sFID↓IS↑
Without End-to-End Tuning
MaskDiT [56]SD-VAE16005.6910.34177.9
DiT [34]14009.626.85121.5
SiT [30]14008.616.32131.7
FasterDiT [51]4007.915.45131.3
REPA [54]SD-VAE2019.406.0667.4
4011.106.0667.4
807.905.06122.6
8005.905.73157.8
With End-to-End Tuning (Ours)
REPA-ESD-VAE*2012.835.0488.8
407.174.39123.7
804.074.60161.8
+ +Table 1. REPA-E for Accelerated Generation Performance. End-to-End training with REPA-E achieves significantly better performance (lower gFID) while using fewer epochs. Notably, REPA-E with only 80 epochs surpasses vanilla REPA using $10 \times$ epochs. * indicates that VAE is updated during end-to-end training. All results are w/o classifier-free guidance on ImageNet 256 × 256. Additional system-level comparisons with classifier-free guidance and state-of-the-art results are provided in Tab. 9. + +ageNet [6] validation set at a resolution of $256 \times 256$ . + +# 4.2. Impact on Training Performance and Speed + +We first analyze the impact of end-to-end tuning using REPA-E (Sec. 3.2) for improving generation performance and speed when training latent-diffusion transformers. + +Quantitative Evaluation. We compare REPA-E against various latent diffusion model (LDM) baselines in Tab. 1. We evaluate models of similar sizes ( $\sim$ 675M parameters) + +
Diff. ModelgFID↓sFID↓IS↑Prec.↑Rec.↑
SiT-B (130M)49.57.0027.50.460.59
+REPA-E (Ours)34.86.3139.10.570.59
SiT-L (458M)24.16.2555.70.620.60
+REPA-E (Ours)16.35.6975.00.680.60
SiT-XL (675M)19.46.0667.40.640.61
+REPA-E (Ours)12.85.0488.80.710.58
+ +Table 2. Variation in Model-Scale. We find that REPA-E brings substantial performance improvements across all model-scales. All baselines are reported using vanilla-REPA [54] for training. + +on ImageNet $256 \times 256$ generation task. All results are reported without classifier-free guidance [18] using popular SiT-XL [30] model for training. We make two observations; 1) End-to-End tuning leads to faster training: consistently improving generation FID (gFID) from $19.40 \rightarrow 12.83$ (20 epochs), $11.10 \rightarrow 7.17$ (40 epochs), and $7.90 \rightarrow 4.07$ (80 epochs), even when comparing with REPA [54]. 2) End-to-End training leads to better final performance: REPA-E at 80 epochs surpasses FasterDiT [51] ( $gFID = 7.91$ ) trained for 400 epochs and even MaskDiT [56], DiT [34], and SiT [30] which are trained over 1400 epochs. For instance, REPA-E reaches an FID of 4.07 within 400K steps, significantly boosting final performance over even REPA which only reaches a final FID for 5.9 after 4M steps [54]. + +Qualitative Evaluation. We provide qualitative comparisons between REPA [54] and REPA-E in Fig. 4. We generate images from the same noise and label using checkpoints at $50\mathrm{K}$ , $100\mathrm{K}$ , and $400\mathrm{K}$ training iterations, respectively. As seen in Fig. 4, we observe that REPA-E demonstrates superior image generation quality compared to the + +![](images/70ad31221b2f68af8bce71e05adc8b7df7169655277a3c20d0ba2992aa8a31cc.jpg) +(a) PCA Visualization of Latent Space Structure [24] + +![](images/77c564390a6c95f2c38a5d95ea756c20cec3d4210e716525a358c5d26a1aea66.jpg) +(b) Impact of End-to-End Tuning for Automatically Improving Latent Space Structure +Figure 6. End-to-End Training Improves Latent Space Structure. (a) We observe that the latent space of pretrained VAEs can suffer either high noise components (e.g., SDXL-VAE, SD-VAE [40]), or, be over-smoothed and lack details (e.g., VA-VAE [50]). (b) The use of end-to-end tuning ( $\S 3.2$ ) automatically helps improve the latent space structure in a model-agnostic manner across different VAE architectures. For instance, similar to findings of concurrent work [44], we observe that SD-VAE suffers from high noise components in the latent space. Applying end-to-end training automatically helps adjust the latent space to reduce noise. In contrast, other VAEs such as recently proposed VA-VAE [50] suffer from an over-smoothed latent space. The use of end-to-end tuning with REPA-E automatically helps learn a more detailed latent-space structure to best support generation performance. + +
Target Repr.gFID↓sFID↓IS↑Prec.↑Rec.↑
I-JEPA-H [2]23.05.8160.30.620.60
+REPA-E (Ours)16.55.1873.60.680.60
CLIP-L [37]29.25.9846.40.590.61
+REPA-E (Ours)23.46.4457.10.620.60
DINOv2-B [33]24.16.2555.70.620.60
+REPA-E (Ours)16.35.6975.00.680.60
DINOv2-L [33]23.35.8959.90.610.60
+REPA-E (Ours)16.05.5977.70.680.58
+ +Table 3. Variation in Representation Encoder. REPA-E yields consistent performance improvements across different choices for the representation-encoder used for representation-alignment [54]. All baselines are reported using vanilla-REPA [54] for training. + +
AutoencodergFID↓sFID↓IS↑Prec.↑Rec.↑
SD-VAE [40]24.16.2555.70.620.60
+REPA-E (Ours)16.35.6975.00.680.60
IN-VAE (f16d32)22.75.4756.00.620.62
+REPA-E (Ours)12.75.5784.00.690.62
VA-VAE [50]12.86.4783.80.710.58
+REPA-E (Ours)11.15.3188.80.720.61
+ +REPA baseline, while also generating more structurally meaningful images during early stages of training process. + +# 4.3. Generalization and Scalability of REPA-E + +We next analyze the generalization of the proposed approach to variation in training settings including model-size, tokenizer architecture, representation encoder, alignment depth [54] etc. Unless otherwise specified, all analysis and ablations use SiT-L [30] as the generative model, + +Table 4. Variation in VAE Architecture. All baselines are reported using vanilla-REPA [54] for training. + +
Aln. DepthgFID↓sFID↓IS↑Prec.↑Rec.↑
6th layer23.05.7259.20.620.60
+REPA-E (Ours)16.46.6474.30.670.59
8th layer24.16.2555.70.620.60
+REPA-E (Ours)16.35.6975.00.680.60
10th layer23.75.9156.90.620.60
+REPA-E (Ours)16.25.2274.70.680.58
+ +Table 5. Variation in Alignment Depth. End-to-End tuning (REPA-E) gives consistent performance imrpoements over original REPA [54] across varying alignment-depths. + +
ComponentgFID↓sFID↓IS↑Prec.↑Rec.↑
w/o stopgrad444.1460.31.490.000.00
w/o batch-norm18.15.3272.40.670.59
w/o LGAN19.26.4768.20.640.58
REPA-E (Ours)16.35.6975.00.680.60
+ +Table 6. Ablation Study on Role of Different Components. + +SD-VAE as the VAE, and DINOv2-B [33] as the pretrained vision model for REPA loss [54]. Default REPA alignment-depth of 8 is used. We train each variant for 100K iterations and report results without classifier-free guidance [18]. All baseline numbers are reported using vanilla REPA and compared with end-to-end training using REPA-E. + +Impact of Model Size. Tab. 2 compares SiT-B, SiT-L, and SiT-XL to evaluate the effect of model size. We make two key observations. First, across all configurations, REPA-E consistently improves performance over the REPA baseline. Specifically, it reduces gFID from $49.5 \rightarrow 34.8$ for SiT-B, $24.1 \rightarrow 16.3$ for SiT-L, and $19.4 \rightarrow 12.8$ for SiT-XL, demonstrating the effectiveness. Second, surprisingly the percentage gains in gFID achieved with REPA-E (over REPA) improve with increasing model size. For in- + +
MethodgFID↓sFID↓IS↑Prec.↑Rec.↑
100K Iterations (20 Epochs)
REPA [54]19.406.0667.40.640.61
REPA-E (scratch)14.127.8783.50.700.59
REPA-E (VAE init.)12.835.0488.80.710.58
200K Iterations (40 Epochs)
REPA [54]11.105.05100.40.690.64
REPA-E (scratch)7.546.17120.40.740.61
REPA-E (VAE init.)7.174.39123.70.740.62
400K Iterations (80 Epochs)
REPA [54]7.905.06122.60.700.65
REPA-E (scratch)4.344.44154.30.750.63
REPA-E (VAE init.)4.074.60161.80.760.62
+ +Table 7. End-to-End Training from Scratch. We find that while initializing the VAE with pretrained weights (SD-VAE [40]) helps slightly improve performance, REPA-E can be used to train both VAE and LDM from scratch in an end-to-end manner; still achieving significantly superior performance over REPA which requires a separate stage for training VAE in addition to LDM training. + +stance, for SiT-B model REPA-E leads to a $29.6\%$ improvement in gFID over REPA. Surprisingly even more gains are achieved for bigger models improving gFID by $32.3\%$ and $34.0\%$ for SiT-L and SiT-XL models respectively. This trend highlights the scalability of REPA-E; larger models achieve better percentage gains over vanilla-REPA. + +Variation in Representation Encoder. We report results across different perception model encoders (CLIP-L, I-JEPA-H, DINOv2-B, and DINOv2-L) Tab. 3. We observe that REPA-E gives consistent performance improvements over REPA, across different choices of the perceptual encoder model. In particular, with DINOv2-B and DINOv2-L, REPA-E significantly reduces gFID from $24.1 \rightarrow 16.3$ and from $23.3 \rightarrow 16.0$ , respectively. + +Variation in VAE. Tab. 4 evaluates the impact of different VAEs on REPA-E performance. In particular, we report results using three different VAEs 1) SD-VAE [1], 2) VA-VAE [50] and 3) IN-VAE (a $16\times$ downsampling, 32-channel VAE trained on ImageNet [6] using official training code from [40]). Across all variations, REPA-E consistently improves performance over the REPA baseline. REPA-E reduces gFID from $24.1\rightarrow 16.3$ from $22.7\rightarrow 12.7$ and $12.8\rightarrow 11.1$ for SD-VAE, IN-VAE and VA-VAE, respectively. The results demonstrate that REPA-E robustly improves generative quality across diverse variations in architecture, pretraining dataset and training setting of the VAE. + +Variation in Alignment Depth. Tab. 5 investigates the effect of applying the alignment loss at different layers the diffusion model. We observe that REPA-E consistently enhances generation quality over the REPA baseline across variation in choice of alignment depth; with gFID improving from $23.0 \rightarrow 16.4$ (6th layer), $24.1 \rightarrow 16.3$ (8th layer), and $23.7 \rightarrow 16.2$ (10th layer). + +
VAEDiffusion modelREPAgFID-50K
SD-VAE [40]DiT-XL [34]19.82
VA-VAE [50]DiT-XL [34]6.74
E2E-VAE (Ours)DiT-XL [34]6.75
SD-VAE [40]SiT-XL [30]17.20
VA-VAE [50]SiT-XL [30]5.93
E2E-VAE (Ours)SiT-XL [30]5.26
SD-VAE [40]DiT-XL [34]12.29
VA-VAE [50]DiT-XL [34]4.71
E2E-VAE (Ours)DiT-XL [34]4.20
SD-VAE [40]SiT-XL [30]7.90
VA-VAE [50]SiT-XL [30]4.88
E2E-VAE (Ours)SiT-XL [30]3.46
+ +Table 8. Impact of End-to-End Tuning on VAE Performance. We find that once tuned using REPA-E, the finetuned VAEs can be used as a drop-in replacement for their original counterparts offering significantly accelerated generation performance. We fix all the VAEs and only train the diffusion models (with and w/o REPA). E2E-VAE is obtained from REPA-E fine-tuning (VA-VAE + SiT-XL). All results are reported at 80 epochs (400K iterations). + +Ablation on Design Components. We also perform ablation studies analyzing the importance of each component discussed in Sec. 3.2. Results are shown in Tab. 6. We observe that each component plays a key role in the final performance for REPA-E. In particular, we observe that the stop-grad operation on the diffusion loss helps prevent degradation of the latent-space structure. Similarly, the use of batch norm is useful adaptively normalizing the latent-statistics and helps improve the gFID from $18.09 \rightarrow 16.3$ . Similarly, the regularization losses play a key role in maintaining the reconstruction performance of the finetuned VAE, thereby improving the gFID from $19.07 \rightarrow 16.3$ . + +End-to-End Training from Scratch. We next analyze the impact of VAE initialization on end-to-end training. As shown in Tab. 7, we find that while initializing the VAE from pretrained weights helps slightly improve performance, REPA-E can be used to train both VAE and LDM from scratch still achieving superior performance over REPA, which technically requires a separate stage for VAE training in addition to LDM training. For instance, while REPA achieves a FID of 5.90 after 4M iterations, REPA-E while training entirely from scratch (for both VAE and LDM) achieves much faster and better generation FID of 4.34 within just 400K iterations. + +# 4.4. Impact of End-to-End Tuning on VAE + +We next analyze the impact of end-to-end tuning on the VAE. In particular, we first show that end-to-end tuning improves the latent-space structure (Fig. 6). We next show that once tuned using REPA-E, the finetuned VAEs can be used as a drop-in replacement for their original counterparts offering significantly improved generation performance. + +End-to-End Training improves Latent Space Structure. Results are shown in Fig. 6. Following [24], we visu + +
TokenizerMethodTraining Epoches#paramsrFID↓Generation w/o CFGGeneration w/ CFG
gFID↓sFID↓IS↑Prec.↑Rec.↑gFID↓sFID↓IS↑Prec.↑Rec.↑
AutoRegressive (AR)
MaskGiTMaskGIT [4]555227M2.286.18-182.10.800.51-----
VQGANLlamaGen [45]3003.1B0.599.388.24112.90.690.672.185.97263.30.810.58
VQVAEVAR [46]3502.0B------1.80-365.40.830.57
LFQ tokenizersMagViT-v2 [52]1080307M1.503.65-200.5--1.78-319.4--
LDMMAR [27]800945M0.532.35-227.80.790.621.55-303.70.810.62
Latent Diffusion Models (LDM)
SD-VAE [40]MaskDiT [56]1600675M0.615.6910.34177.90.740.602.285.67276.60.800.61
DiT [34]1400675M9.626.85121.50.670.672.274.60278.20.830.57
SiT [30]1400675M8.616.32131.70.680.672.064.50270.30.820.59
FasterDiT [51]400675M7.915.45131.30.670.692.034.63264.00.810.60
MDT [12]1300675M6.235.23143.00.710.651.794.57283.00.810.61
MDTv2 [13]1080675M-----1.584.52314.70.790.65
Representation Alignment Methods
VA-VAE [50]LightningDiT [50]80675M0.284.29---------
800675M2.054.37207.70.770.661.254.15295.30.800.65
SD-VAEREPA [54]80675M0.617.905.06122.60.700.65-----
800675M5.845.79158.70.700.681.284.68305.70.790.64
E2E-VAE (Ours)REPA80675M0.283.464.17159.80.770.631.674.12266.30.800.63
800675M1.694.17219.30.770.671.124.09302.90.790.66
+ +Table 9. System-Level Performance on ImageNet $256 \times 256$ comparing our end-to-end tuned VAE (E2E-VAE) with other VAEs for traditional LDM training. Note that all representation alignment methods at 800 epochs are evaluated using a class-balanced sampling protocol, as detailed in App. C. We observe that in addition to improving VAE latent space structure (Fig. 6), end-to-end tuning significantly improves VAE downstream generation performance. Once tuned using REPA-E, the improved VAE can be used as drop-in replacement for their original counterparts for accelerated generation performance. Overall, our approach helps improve both LDM and VAE performance — achieving a new state-of-the-art FID of 1.12 and 0.28, respectively for LDM generation and VAE reconstruction performance. + +alize latent space structure using principal component analysis (PCA) that projects them to three channels colored by RGB. We consider three different VAEs: 1) SD-VAE [40], 2) IN-VAE (a $16\times$ downsampling, 32-channel VAE trained on ImageNet [6]). 3) VA-VAE from recent work from [50]. We observe that end-to-end tuning using REPA-E automatically improves the latent space structure of the original VAE. For instance, similar to findings of concurrent work [44], we observe that SD-VAE suffers from high noise components in the latent space. Applying end-to-end training automatically helps adjust the latent space to learn reduce noise. In contrast, other VAEs such as recently proposed VA-VAE [50] suffer from over-smother latent space. Application of E2E tuning automatically helps learn a more detailed latent-space to best support generation performance. + +End-to-End Training Improves VAE Performance. We next evaluate the impact of end-to-end tuning on downstream generation performance of the VAE. To this end, we first use end-to-end tuning for finetuning the recently proposed VA-VAE [50]. We then use the resulting end-to-end finetuned-VAE (named E2E-VAE), and compare its downstream generation performance with current state-of-the-art VAEs; including SDVAE [40] and VA-VAE [50]. To do this, we conduct traditional latent diffusion model training (w/o REPA-E), where only the generator network is updated while keeping the VAE frozen. Tab. 8 shows the comparison of VAE downstream generation across diverse train + +ing settings. We observe that end-to-end tuned VAEs consistently outperform their original counterparts for downstream generation tasks across variations in LDM architecture and training settings. Interestingly, we observe that a VAE tuned using SiT-XL yields performance improvements even when using a different LDM architecture such as DiT-XL; thereby demonstrating the robustness of our approach. + +# 5. Conclusion + +"Can we unlock VAE's for performing end-to-end training with latent diffusion transformers?" Directly backpropagating diffusion loss to the VAE is ineffective and even degrages final performance. We show that while diffusion loss is ineffective, end-to-end training can be unlocked using REPA loss. Our end-to-end training recipe (REPA-E), significantly improves latent-space structure, shows remarkable performance; speeding up diffusion model training by over $17 \times$ and $45 \times$ over REPA and vanilla training recipes. Overall, our approach achieves a new state-of-the-art results with generation FID of 1.12 and 1.69 with and without use of classifier-free guidance. We hope that our work can help foster further research for enabling end-to-end training with latent diffusion transformers. + +# Acknowledgments + +We would like to extend our deepest appreciation to Zeyu Zhang, Qinyu Zhao, and Zhanhao Liang for insightful discussions. We would also like to thank all reviewers for their constructive feedback. This work was supported in part by the Australian Research Council under Discovery Project DP210102801 and Future Fellowship FT240100820. SX acknowledges support from the OpenPath AI Foundation, IITP grant funded by the Korean Government (MSIT) (No. RS-2024-00457882) and NSF Award IIS-2443404. + +# References + +[1] Stability AI. Improved autoencoders ... https://huggingface.co/stabilityyai/sd-vae-ft-mse, n.d. Accessed: April 11, 2025. 5, 8 +[2] Mahmoud Assran, Quentin Duval, Ishan Misra, Piotr Bojanowski, Pascal Vincent, Michael Rabbat, Yann LeCun, and Nicolas Ballas. Self-supervised learning from images with a joint-embedding predictive architecture. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 15619–15629, 2023. 7 +[3] Dana H Ballard. Modular learning in neural networks. In Proceedings of the sixth National conference on Artificial intelligence-Volume 1, pages 279-284, 1987. 3 +[4] Huiwen Chang, Han Zhang, Lu Jiang, Ce Liu, and William T Freeman. Maskgit: Masked generative image transformer. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11315-11325, 2022. 9 +[5] Junsong Chen, Jincheng Yu, Chongjian Ge, Lewei Yao, Enze Xie, Yue Wu, Zhongdao Wang, James Kwok, Ping Luo, Huchuan Lu, et al. Pixart-alpha: Fast training of diffusion transformer for photorealistic text-to-image synthesis. arXiv preprint arXiv:2310.00426, 2023. 3 +[6] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In 2009 IEEE conference on computer vision and pattern recognition, pages 248-255. IEEE, 2009. 2, 5, 6, 8, 9, 13 +[7] Prafulla Dhariwal and Alexander Nichol. Diffusion models beat gans on image synthesis. Advances in neural information processing systems, 34:8780-8794, 2021. 5 +[8] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, and Neil Houlsby. An image is worth 16x16 words: Transformers for image recognition at scale. In International Conference on Learning Representations, 2021. 3 +[9] Patrick Esser, Robin Rombach, and Bjorn Ommer. Taming transformers for high-resolution image synthesis. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 12873-12883, 2021. 3 +[10] Patrick Esser, Sumith Kulal, Andreas Blattmann, Rahim Entezari, Jonas Müller, Harry Saini, Yam Levi, Dominik + +Lorenz, Axel Sauer, Frederic Boesel, et al. Scaling rectified flow transformers for high-resolution image synthesis. In *Forty-first International Conference on Machine Learning*, 2024. 3, 13 +[11] Peng Gao, Le Zhuo, Ziyi Lin, Chris Liu, Junsong Chen, Ruoyi Du, Enze Xie, Xu Luo, Longtian Qiu, Yuhang Zhang, et al. Lumina-t2x: Transforming text into any modality, resolution, and duration via flow-based large diffusion transformers. arXiv preprint arXiv:2405.05945, 2024. 3 +[12] Shanghua Gao, Pan Zhou, Ming-Ming Cheng, and Shuicheng Yan. Masked diffusion transformer is a strong image synthesizer. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 23164-23173, 2023. 9 +[13] Shanghua Gao, Pan Zhou, Ming-Ming Cheng, and Shuicheng Yan. Mdtv2: Masked diffusion transformer is a strong image synthesizer. arXiv preprint arXiv:2303.14389, 2023. 9 +[14] Ross Girshick. Fast r-cnn. In Proceedings of the IEEE international conference on computer vision, pages 1440-1448, 2015. 1 +[15] Ross Girshick, Jeff Donahue, Trevor Darrell, and Jitendra Malik. Rich feature hierarchies for accurate object detection and semantic segmentation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 580-587, 2014. 1 +[16] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 770-778, 2016. 3 +[17] Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, and Sepp Hochreiter. Gans trained by a two time-scale update rule converge to a local nash equilibrium. Advances in neural information processing systems, 30, 2017. 5 +[18] Jonathan Ho and Tim Salimans. Classifier-free diffusion guidance. arXiv preprint arXiv:2207.12598, 2022. 6, 7 +[19] Minyoung Huh, Brian Cheung, Tongzhou Wang, and Phillip Isola. The platonic representation hypothesis. In International Conference on Machine Learning, 2024. 3 +[20] Sergey Ioffe and Christian Szegedy. Batch normalization: Accelerating deep network training by reducing internal covariate shift. In International conference on machine learning, pages 448-456. pmlr, 2015. 4, 5 +[21] Dongwon Kim, Ju He, Qihang Yu, Chenglin Yang, Xiaohui Shen, Suha Kwak, and Liang-Chieh Chen. Democratizing text-to-image masked generative models with compact text-aware one-dimensional tokens. arXiv preprint arXiv:2501.07730, 2025. 3 +[22] Diederik P Kingma. Auto-encoding variational bayes. arXiv preprint arXiv:1312.6114, 2013. 1, 3 +[23] Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014.5 +[24] Theodoros Kouzelis, Ioannis Kakogeorgiou, Spyros Gidaris, and Nikos Komodakis. Eq-vae: Equivalence regularized latent space for improved generative image modeling. arXiv preprint arXiv:2502.09509, 2025. 2, 3, 7, 8 + +[25] Tuomas Kynkänniemi, Tero Karras, Samuli Laine, Jaakko Lehtinen, and Timo Aila. Improved precision and recall metric for assessing generative models. Advances in neural information processing systems, 32, 2019. 5 +[26] Black Forest Labs. Flux. https://github.com/black-forest-labs/flux, 2024.3 +[27] Tianhong Li, Yonglong Tian, He Li, Mingyang Deng, and Kaiming He. Autoregressive image generation without vector quantization. Advances in Neural Information Processing Systems, 37:56424-56445, 2025. 9, 14 +[28] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dólar, and C Lawrence Zitnick. Microsoft coco: Common objects in context. In European conference on computer vision, pages 740-755. Springer, 2014. 13 +[29] I Loshchilov. Decoupled weight decay regularization. arXiv preprint arXiv:1711.05101, 2017. 5 +[30] Nanye Ma, Mark Goldstein, Michael S Albergo, Nicholas M Boffi, Eric Vanden-Eijnden, and Saining Xie. Sit: Exploring flow and diffusion-based generative models with scalable interpolant transformers. In European Conference on Computer Vision, pages 23-40. Springer, 2024. 2, 3, 5, 6, 7, 8, 9, 13 +[31] Charlie Nash, Jacob Menick, Sander Dieleman, and Peter Battaglia. Generating images with sparse representations. In International Conference on Machine Learning, pages 7958-7968. PMLR, 2021. 5 +[32] OpenAI. Sora. https://openai.com/sora, 2024.3 +[33] Maxime Oquab, Timothee Darct, Theo Moutakanni, Huy Vo, Marc Szafraniec, Vasil Khalidov, Pierre Fernandez, Daniel Haziza, Francisco Massa, Alaaeldin El-Nouby, et al. Dinov2: Learning robust visual features without supervision. Transactions on Machine Learning Research Journal, pages 1-31, 2024. 4, 5, 7 +[34] William Peebles and Saining Xie. Scalable diffusion models with transformers. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 4195-4205, 2023. 3, 6, 8, 9 +[35] Pablo Pernias, Dominic Rampas, Mats Leon Richter, Christopher Pal, and Marc Aubreville. Würstchen: An efficient architecture for large-scale text-to-image diffusion models. In The Twelfth International Conference on Learning Representations, 2023. 3 +[36] Dustin Podell, Zion English, Kyle Lacey, Andreas Blattmann, Tim Dockhorn, Jonas Müller, Joe Penna, and Robin Rombach. SDXL: Improving latent diffusion models for high-resolution image synthesis. In The Twelfth International Conference on Learning Representations, 2024. 3 +[37] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PMLR, 2021. 7 +[38] Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. Faster r-cnn: Towards real-time object detection with region proposal networks. IEEE transactions on pattern analysis and machine intelligence, 39(6):1137-1149, 2016. 1 + +[39] Sucheng Ren, Qihang Yu, Ju He, Xiaohui Shen, Alan Yuille, and Liang-Chieh Chen. Beyond next-token: Next-x prediction for autoregressive visual generation. arXiv preprint arXiv:2502.20388, 2025. 14 +[40] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 10684-10695, 2022. 1, 2, 3, 4, 5, 7, 8, 9, 13, 14 +[41] Leonid I. Rudin, Stanley Osher, and Emad Fatemi. Nonlinear total variation based noise removal algorithms. Physica D: Nonlinear Phenomena, 60(1):259-268, 1992. 13 +[42] Tim Salimans, Ian Goodfellow, Wojciech Zaremba, Vicki Cheung, Alec Radford, and Xi Chen. Improved techniques for training gans. Advances in neural information processing systems, 29, 2016. 5 +[43] Jaskirat Singh, Stephen Gould, and Liang Zheng. High-fidelity guided image synthesis with latent diffusion models. arXiv preprint arXiv:2211.17084, 2022. 3 +[44] Ivan Skorokhodov, Sharath Girish, Benran Hu, Willi Menapace, Yanyu Li, Rameen Abdal, Sergey Tulyakov, and Aliaksandr Siarohin. Improving the diffusability of autoencoders. arXiv preprint arXiv:2502.14831, 2025. 2, 7, 9 +[45] Peize Sun, Yi Jiang, Shoufa Chen, Shilong Zhang, Bingyue Peng, Ping Luo, and Zehuan Yuan. Autoregressive model beats diffusion: Llama for scalable image generation. arXiv preprint arXiv:2406.06525, 2024. 9 +[46] Keyu Tian, Yi Jiang, Zehuan Yuan, Bingyue Peng, and Liwei Wang. Visual autoregressive modeling: Scalable image generation via next-scale prediction. Advances in neural information processing systems, 37:84839-84865, 2025. 9 +[47] Arash Vahdat, Karsten Kreis, and Jan Kautz. Score-based generative modeling in latent space. In Advances in Neural Information Processing Systems, pages 11287-11302. Curran Associates, Inc., 2021. 3, 13 +[48] Aaron Van Den Oord, Oriol Vinyals, et al. Neural discrete representation learning. Advances in neural information processing systems, 30, 2017. 3 +[49] Shuai Wang, Zhi Tian, Weilin Huang, and Limin Wang. Ddt: Decoupled diffusion transformer. arXiv preprint arXiv:2504.05741, 2025. 14 +[50] Jingfeng Yao and Xinggang Wang. Reconstruction vs. generation: Taming optimization dilemma in latent diffusion models. arXiv preprint arXiv:2501.01423, 2025. 3, 7, 8, 9, 14 +[51] Jingfeng Yao, Wang Cheng, Wenyu Liu, and Xinggang Wang. Fasteredit: Towards faster diffusion transformers training without architecture modification. arXiv preprint arXiv:2410.10356, 2024. 6, 9 +[52] Lijun Yu, José Lezama, Nitesh B Gundavarapu, Luca Versari, Kihyuk Sohn, David Minnen, Yong Cheng, Vighnesh Birodkar, Agrim Gupta, Xiuye Gu, et al. Language model beats diffusion-tokenizer is key to visual generation. arXiv preprint arXiv:2310.05737, 2023. 9 +[53] Qihang Yu, Mark Weber, Xueqing Deng, Xiaohui Shen, Daniel Cremers, and Liang-Chieh Chen. An image is worth 32 tokens for reconstruction and generation. Advances in Neural Information Processing Systems, 37:128940-128966, 2025. 3 + +[54] Sihyun Yu, Sangkyung Kwak, Huiwon Jang, Jongheon Jeong, Jonathan Huang, Jinwoo Shin, and Saining Xie. Representation alignment for generation: Training diffusion transformers is easier than you think. arXiv preprint arXiv:2410.06940, 2024. 2, 3, 4, 5, 6, 7, 8, 9, 13 +[55] Kaiwen Zha, Lijun Yu, Alireza Fathi, David A Ross, Cordelia Schmid, Dina Katabi, and Xiuye Gu. Language-guided image tokenization for generation. arXiv preprint arXiv:2412.05796, 2024. 3 +[56] Hongkai Zheng, Weili Nie, Arash Vahdat, and Anima Anandkumar. Fast training of diffusion models with masked transformers. arXiv preprint arXiv:2306.09305, 2023. 6, 9 + +# REPA-E: Unlocking VAE for End-to-End Tuning with Latent Diffusion Transformers + +Supplementary Material + +
Training StrategySpatial VarianceTotal Variation
w/o E2E Tuning17.066627.35
E2E w/ REPA Loss18.025516.14
E2E w/ Diff. Loss0.0289.80
+ +Table 10. Impact of Naive End-to-End Training with Diffusion Loss. We report total variation [41] and mean variance along each VAE latent channel for three training settings: 1) Standard LDM training (w/o end-to-end (E2E) tuning), 2) Naive E2E tuning with Diffusion loss, 3) E2E tuning with REPA loss [54]. All experiments use SDVAE for VAE initialization. We observe that using diffusion loss for end-to-end tuning encourages learning a simpler latent space with lower variance along the spatial dimensions (Fig. 3a). The simpler latent space is easier for denoising objective ( $\S 3.1$ ), but degrades final generation performance (Fig. 1). All results are reported at 400K iterations with SiT-XL/2 [30] as LDM. + +# A. Impact of Diffusion Loss on Latent Space + +We analyze the effect of naively using diffusion loss for end-to-end tuning, focusing on how it alters the latent space structure. All experiments here use SD-VAE for tokenizer initialization and SiT-XL/2 [30] as the latent diffusion model, trained for 400K iterations without classifier-free guidance. We report two metrics to quantify latent structure, 1) Spatial Variance, computed as the mean per-channel variance across spatial dimensions, and 2) Total Variation [41], which captures local spatial differences in the latent map. + +As shown in Tab. 10 and Fig. 3, directly backpropagating the diffusion loss leads to reduced spatial variance, which creates an easier denoising problem by hacking the latent space but leads to reduced image generation performance. In contrast, end-to-end training with REPA-E not only leads to improved generation performance but also improves the latent space structure for the underlying VAE (Fig. 3, 6). + +# B. Additional Analysis + +
MethodgFID ↓sFID ↓IS ↑Prec. ↑Rec. ↑
REPA + E2E-Diffusion444.1460.31.490.000.00
REPA + E2E-LSGM9.895.07107.50.720.61
REPA-E (Ours)4.074.60161.80.760.62
+ +Table 11. Comparison with LSGM Objective. REPA-E shows better generation performance and convergence speed. + +Comparison of End-to-End Training Objectives. We provide additional results comparing different objectives for + +
MethodgFID↓sFID↓IS↑Prec.↑Rec.↑
REPA + SiT-L22.25.6858.30.740.60
REPA-E + SiT-L12.84.6090.60.790.61
+ +Table 12. Scaling REPA-E to Higher Resolution. System-level results on ImageNet-512 with $64 \times 64$ latents using SiT-L at 100K steps without classifier-free guidance. We observe that REPA-E leads to significant performance improvements over vanilla-REPA [54] even at high resolutions. + +
SamplerODE, NFE=50SDE, NFE=250
gFIDVA-VAE 5.43E2E-VAE 5.02VA-VAE 5.57E2E-VAE 4.97
+ +Table 13. Generalization to T2I Tasks. FID results on MSCOCO text-to-image generation using MMDiT + REPA. We find that end-to-end tuned VAEs (E2E-VAE) also generalizes to T2I tasks showing improved generation performance. + +end-to-end training of VAE and LDM. Specifically, we evaluate: 1) naive E2E training by backpropagating diffusion loss to VAE encoder, 2) the LSGM entropy-regularized objective [47], 3) our proposed REPA-E. All methods are trained with SiT-XL for 400K steps under consistent settings. + +The LSGM objective prevents feature collapse by maximizing entropy of the latent space. However, as shown in Tab. 11, our REPA-E formulation yields better performance across all metrics at just $400\mathrm{K}$ steps, with significantly faster convergence and stronger generation quality. + +Scaling REPA-E to Higher Latent Resolution. We conduct experiments on ImageNet-512 [6] to evaluate the performance of REPA-E under higher-resolution latent settings $(64 \times 64)$ . We use SD-VAE [40] as the tokenizer and SiT-L as the diffusion model, trained for 100K steps and we report the performance without classifier-free guidance. As shown in Tab. 12, our approach yields significant improvements in generation quality compared to REPA. + +MSCOCO Text-to-Image Generation with E2E-VAE. To further evaluate the utility of the tuned VAE beyond ImageNet, we assess its performance in a text-to-image generation (T2I) setting on MSCOCO [28]. Following REPA [54], we adopt MMDiT [10] as the diffusion backbone and apply REPA loss across all variants. All models are trained for 100K steps and evaluated using classifier-free guidance with $\alpha_{\mathrm{cfg}} = 2.0$ and EMA weights during inference. We report generation FID, and observe that replacing VA-VAE with our E2E-VAE consistently improves downstream text-to-image generation quality (Tab. 13). + +
AutoencoderPSNR↑SSIM↑LPIPS↓rFID↓
SD-VAE [40]25.670.720.130.74
+REPA-E (Ours)24.840.710.150.53
IN-VAE (f16d32)27.400.800.090.26
+REPA-E (Ours)26.870.780.110.27
VA-VAE [50]26.320.760.110.28
+REPA-E (Ours)26.250.750.110.28
+ +Table 14. VAE Reconstruction Evaluation on ImageNet-256. While REPA-E primarily improves the generative capability of the VAE (see Tab. 9), it also maintains competitive reconstruction quality across all metrics. + +# C. Remarks on FID Evaluation + +Throughout the paper, we follow the standard ImageNet conditional evaluation protocol, where 50,000 images are generated by randomly sampling class labels. Recent papers [27, 39, 49] have adopted class-balanced generation for evaluation, where 50 images per class are generated across the 1,000 categories. To our surprise, we found that using class-balanced sampling yields slightly better FID performance. Therefore, for the results in Tab. 9, we adopt this class-balanced sampling strategy. Accordingly, all representation alignment methods at the 800-epoch checkpoint in this table are computed under the balanced sampling protocol to ensure a fair and consistent comparison. \ No newline at end of file diff --git a/data/2025/2504_10xxx/2504.10483/images/026a2509afc429c08ebf20dbfc03dde3f7f9882b223f893a1155f70e028d28db.jpg b/data/2025/2504_10xxx/2504.10483/images/026a2509afc429c08ebf20dbfc03dde3f7f9882b223f893a1155f70e028d28db.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a3b4ab14ddbb5f61f45043e329902a6dd9955a5f --- /dev/null +++ b/data/2025/2504_10xxx/2504.10483/images/026a2509afc429c08ebf20dbfc03dde3f7f9882b223f893a1155f70e028d28db.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a41c04759e4c37d644666b31f7d308c1aadb553e9bd45ab6bcf7d6a1dade19d5 +size 4292 diff --git a/data/2025/2504_10xxx/2504.10483/images/04d75970ad292add98f1113716832632b5e8e141ef16b0ddb372ed0b172873c5.jpg b/data/2025/2504_10xxx/2504.10483/images/04d75970ad292add98f1113716832632b5e8e141ef16b0ddb372ed0b172873c5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..880f4846bef29fdaa5e580d9fb0dc9a39a6e5883 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10483/images/04d75970ad292add98f1113716832632b5e8e141ef16b0ddb372ed0b172873c5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d913073bab1dd31d4cb8a9135850ef09fb62421799d2b5225dadd603f138eb27 +size 13512 diff --git a/data/2025/2504_10xxx/2504.10483/images/10a3b80800ca4cbbe8a8383df426c38aec1dc6cebe1d8c22ac60df810291d921.jpg b/data/2025/2504_10xxx/2504.10483/images/10a3b80800ca4cbbe8a8383df426c38aec1dc6cebe1d8c22ac60df810291d921.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0da7bca4ce8db1b130ea6a643f382f2ffb3e081b --- /dev/null +++ b/data/2025/2504_10xxx/2504.10483/images/10a3b80800ca4cbbe8a8383df426c38aec1dc6cebe1d8c22ac60df810291d921.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b0db2c0ba328ad4643259b781ea05349ca198be60f7e727942da84db60bfcd25 +size 11133 diff --git a/data/2025/2504_10xxx/2504.10483/images/2062b30ebd77b104482e0ace1af3c36cfdc092c6d22e5d9f827fdc42a5cdf62f.jpg b/data/2025/2504_10xxx/2504.10483/images/2062b30ebd77b104482e0ace1af3c36cfdc092c6d22e5d9f827fdc42a5cdf62f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c3c9177d33b22732ca13d689060b78195b7c700b --- /dev/null +++ b/data/2025/2504_10xxx/2504.10483/images/2062b30ebd77b104482e0ace1af3c36cfdc092c6d22e5d9f827fdc42a5cdf62f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4108fd877fbdc233e456ed870069c49c08d8470bc1bb8ccce956ef80187808d2 +size 1938 diff --git a/data/2025/2504_10xxx/2504.10483/images/23a834c33b92caf6f54237aa86afb54c1667eec855ce1c31c92f32512830e58c.jpg b/data/2025/2504_10xxx/2504.10483/images/23a834c33b92caf6f54237aa86afb54c1667eec855ce1c31c92f32512830e58c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7e824e231129228676892386aa6380262fd13d65 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10483/images/23a834c33b92caf6f54237aa86afb54c1667eec855ce1c31c92f32512830e58c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c512c734d979f3e1593a49a88284ea7811159ad86d37d33e69fcf992377a8679 +size 11233 diff --git a/data/2025/2504_10xxx/2504.10483/images/2578ec0c923a1161d476acd025b22fe4449057186fb959695c72d26185dc88cf.jpg b/data/2025/2504_10xxx/2504.10483/images/2578ec0c923a1161d476acd025b22fe4449057186fb959695c72d26185dc88cf.jpg new file mode 100644 index 0000000000000000000000000000000000000000..79259b35a69b30fc96c15a5bdfe2d3ed94c4b6da --- /dev/null +++ b/data/2025/2504_10xxx/2504.10483/images/2578ec0c923a1161d476acd025b22fe4449057186fb959695c72d26185dc88cf.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0805753f349c878489c0987f3131572994a1a290c63e30f89a226aa6f9acfd77 +size 8466 diff --git a/data/2025/2504_10xxx/2504.10483/images/32779fa7abeff32d8b30751ed86e723fd51a7418a4339d051f5089a2ad573a77.jpg b/data/2025/2504_10xxx/2504.10483/images/32779fa7abeff32d8b30751ed86e723fd51a7418a4339d051f5089a2ad573a77.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7c73999cf1c8a20158d5475adc345da95c2d7358 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10483/images/32779fa7abeff32d8b30751ed86e723fd51a7418a4339d051f5089a2ad573a77.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6d709a5b7cb5aa403f2aef86f44e56c3903a7a990651db6639534afe156185fd +size 6663 diff --git a/data/2025/2504_10xxx/2504.10483/images/3424b3de6794f2e2560bbb40c232b2c343909d6fe3029b8fca2169b1dde57471.jpg b/data/2025/2504_10xxx/2504.10483/images/3424b3de6794f2e2560bbb40c232b2c343909d6fe3029b8fca2169b1dde57471.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a35b8c8914468ec97235894865c7eaed3e05e4f0 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10483/images/3424b3de6794f2e2560bbb40c232b2c343909d6fe3029b8fca2169b1dde57471.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2a0927e4bc9fffb6131d9b95c8e91cd0d785befe4755e434a5aed2d8b7b84d75 +size 6996 diff --git a/data/2025/2504_10xxx/2504.10483/images/3a884ef9a7201faa6ce809763a9801e6306cc17f1db558b545d4255c6db7f8fb.jpg b/data/2025/2504_10xxx/2504.10483/images/3a884ef9a7201faa6ce809763a9801e6306cc17f1db558b545d4255c6db7f8fb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c294969b173eb13ceeeddec6fbaff03f7a00e35f --- /dev/null +++ b/data/2025/2504_10xxx/2504.10483/images/3a884ef9a7201faa6ce809763a9801e6306cc17f1db558b545d4255c6db7f8fb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a4d198d71bc95f14d83a6f6c3c76dc88d6a8b15559b92e114d806b76e51a1cae +size 4430 diff --git a/data/2025/2504_10xxx/2504.10483/images/3af1810a7f98b4e4da924a364be17b210eb220e9b0b6c4f9bb76ce63a6aa8af7.jpg b/data/2025/2504_10xxx/2504.10483/images/3af1810a7f98b4e4da924a364be17b210eb220e9b0b6c4f9bb76ce63a6aa8af7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ef94c14e5e460eff037a7eeb11a48998438e8c84 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10483/images/3af1810a7f98b4e4da924a364be17b210eb220e9b0b6c4f9bb76ce63a6aa8af7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:36713c7eadb1813bbed9ffcf3f5efb786503af942752d43bbf5ad7537a0e2c30 +size 21639 diff --git a/data/2025/2504_10xxx/2504.10483/images/3b2ef12788c4a2120146d9f5d497dc212500338f2b6db3d99890fedb7710f1b6.jpg b/data/2025/2504_10xxx/2504.10483/images/3b2ef12788c4a2120146d9f5d497dc212500338f2b6db3d99890fedb7710f1b6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..de71e467d0a02da4c8f39bc69331788925cf74b9 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10483/images/3b2ef12788c4a2120146d9f5d497dc212500338f2b6db3d99890fedb7710f1b6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:75d519e95a1ad99b518a9c37c66db4661ba05097d57aa933744ab9ad026197d4 +size 3382 diff --git a/data/2025/2504_10xxx/2504.10483/images/3bd08f1e01a8a2b5250db10979a050e6a8557330b32383b5d72fc672ca983201.jpg b/data/2025/2504_10xxx/2504.10483/images/3bd08f1e01a8a2b5250db10979a050e6a8557330b32383b5d72fc672ca983201.jpg new file mode 100644 index 0000000000000000000000000000000000000000..14a5470b8542b98e37ae3b2c3bfc803e7fb6e81f --- /dev/null +++ b/data/2025/2504_10xxx/2504.10483/images/3bd08f1e01a8a2b5250db10979a050e6a8557330b32383b5d72fc672ca983201.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c8b89f6eb5599413e47de8b26c767045be2c4c9d3a4429f8d7b191f5d26bc2c5 +size 158557 diff --git a/data/2025/2504_10xxx/2504.10483/images/3d0d5d48be88986c8ac48ee99088103e4823610d4c41d445054fcca80dcf74c9.jpg b/data/2025/2504_10xxx/2504.10483/images/3d0d5d48be88986c8ac48ee99088103e4823610d4c41d445054fcca80dcf74c9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f2ef283c35f92381410c560fe4f4d96012d85fc5 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10483/images/3d0d5d48be88986c8ac48ee99088103e4823610d4c41d445054fcca80dcf74c9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b8a7fcbe3f6aa3d0398621a2b734d7b0681558f7913fde637f4eb0f5800ed19a +size 31089 diff --git a/data/2025/2504_10xxx/2504.10483/images/3e1b51168691e30ece2a426a5f9344b7b39df1ab9f2ca7cae0527e56816b6325.jpg b/data/2025/2504_10xxx/2504.10483/images/3e1b51168691e30ece2a426a5f9344b7b39df1ab9f2ca7cae0527e56816b6325.jpg new file mode 100644 index 0000000000000000000000000000000000000000..80decca2941860acf26c029252206b52b6ab3b61 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10483/images/3e1b51168691e30ece2a426a5f9344b7b39df1ab9f2ca7cae0527e56816b6325.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:88443a975ec97de57c1ff7ca10b4cee19131dc567733c4c39d54770b4ec2b6a6 +size 56356 diff --git a/data/2025/2504_10xxx/2504.10483/images/4084e4f8a694c979c3b8b8e5e18a0d0a6961457d400ae16e873a14a36c04d873.jpg b/data/2025/2504_10xxx/2504.10483/images/4084e4f8a694c979c3b8b8e5e18a0d0a6961457d400ae16e873a14a36c04d873.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d258fdf6c765557b6d460192bcbfacc98adecddb --- /dev/null +++ b/data/2025/2504_10xxx/2504.10483/images/4084e4f8a694c979c3b8b8e5e18a0d0a6961457d400ae16e873a14a36c04d873.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d00403ee5b4476afa5beea5e8433ffa3bfd610f38708c8970aa1d05086a7f97a +size 17337 diff --git a/data/2025/2504_10xxx/2504.10483/images/4a1821b1fab0a4d9974dccd6b9994ef489d78001f259fa1cdc2878fe9134dcfe.jpg b/data/2025/2504_10xxx/2504.10483/images/4a1821b1fab0a4d9974dccd6b9994ef489d78001f259fa1cdc2878fe9134dcfe.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d16af8eff963e5001e5b2d1ca803f55adc7f3b52 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10483/images/4a1821b1fab0a4d9974dccd6b9994ef489d78001f259fa1cdc2878fe9134dcfe.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:68a269bea4b4ffe34d8cefcc09c0ceb0934e6d529af1bf66478876e872f6ecf3 +size 36495 diff --git a/data/2025/2504_10xxx/2504.10483/images/4a22383a153c124e4af46b489760f62ae9e0676c1257c25773a561a405c1f621.jpg b/data/2025/2504_10xxx/2504.10483/images/4a22383a153c124e4af46b489760f62ae9e0676c1257c25773a561a405c1f621.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e45c6b4e6d12e2cf2247f59492b4ce931c6dd956 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10483/images/4a22383a153c124e4af46b489760f62ae9e0676c1257c25773a561a405c1f621.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b634d4f85be2adbe02d90cdaa88e9e9e33f9a51aee3858151d96865811d44457 +size 47014 diff --git a/data/2025/2504_10xxx/2504.10483/images/5236958fea9f022969d3155f5426594fa733d4d77a23e4336c7f34abd68a2ea8.jpg b/data/2025/2504_10xxx/2504.10483/images/5236958fea9f022969d3155f5426594fa733d4d77a23e4336c7f34abd68a2ea8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c8ae23e9b0c122f9b29c4b7e1a80d1ab87985e23 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10483/images/5236958fea9f022969d3155f5426594fa733d4d77a23e4336c7f34abd68a2ea8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b6439c8648dce283e150e2e5dde5be91427f82b3542ecaacf8fc63ae765192eb +size 25103 diff --git a/data/2025/2504_10xxx/2504.10483/images/58248ecea17233f3151ed2c3fa2b6660f014b9cc50ef2d343f2fafeecf08c27b.jpg b/data/2025/2504_10xxx/2504.10483/images/58248ecea17233f3151ed2c3fa2b6660f014b9cc50ef2d343f2fafeecf08c27b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..dc03d66dc0c6f1462bd0906cdb5664a8619412ee --- /dev/null +++ b/data/2025/2504_10xxx/2504.10483/images/58248ecea17233f3151ed2c3fa2b6660f014b9cc50ef2d343f2fafeecf08c27b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:939173855ef184acdc67a7308e8af0f84115f857deb2d969be6db99ccfa99a06 +size 49413 diff --git a/data/2025/2504_10xxx/2504.10483/images/591dcd624ebe552ccf98780d13bf06ce9ec7c6a6452497a963591a1120d0e0bc.jpg b/data/2025/2504_10xxx/2504.10483/images/591dcd624ebe552ccf98780d13bf06ce9ec7c6a6452497a963591a1120d0e0bc.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6c57be277f9f247516fd65870eab87a0f2e11882 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10483/images/591dcd624ebe552ccf98780d13bf06ce9ec7c6a6452497a963591a1120d0e0bc.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9d5bc74b1630780473f64b9c33c281f2578d73c7ac9cca4d2db0ad28cf429799 +size 6829 diff --git a/data/2025/2504_10xxx/2504.10483/images/5afd84a905078fcc6a268b92ea0c21c222abc451154a92f902aa8edef8767ab1.jpg b/data/2025/2504_10xxx/2504.10483/images/5afd84a905078fcc6a268b92ea0c21c222abc451154a92f902aa8edef8767ab1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9e2b7d39b6c3b31e20ef7563bd8279766649c81b --- /dev/null +++ b/data/2025/2504_10xxx/2504.10483/images/5afd84a905078fcc6a268b92ea0c21c222abc451154a92f902aa8edef8767ab1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3f96ad75fffd3122715f47d0970f90a518d880c19e75c3d85b9bcb6dbec7a22a +size 62521 diff --git a/data/2025/2504_10xxx/2504.10483/images/678247a76edebf75eef996545bf9f71dcd7acf3febf0a69f2a6874afe513f021.jpg b/data/2025/2504_10xxx/2504.10483/images/678247a76edebf75eef996545bf9f71dcd7acf3febf0a69f2a6874afe513f021.jpg new file mode 100644 index 0000000000000000000000000000000000000000..819f8545bc19088090c129855b8bb75c98a283d1 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10483/images/678247a76edebf75eef996545bf9f71dcd7acf3febf0a69f2a6874afe513f021.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2edf367c97779d561b733fd947045bd504824560e8ca7d1631fb0e1c5a9e7b50 +size 3115 diff --git a/data/2025/2504_10xxx/2504.10483/images/6d94cb8882bfecafaab1bbb1acfc06fe066cc5249188812276c3541185a1e8ca.jpg b/data/2025/2504_10xxx/2504.10483/images/6d94cb8882bfecafaab1bbb1acfc06fe066cc5249188812276c3541185a1e8ca.jpg new file mode 100644 index 0000000000000000000000000000000000000000..df59fd19b6f7a4746f3c6a985c46f56b450f4aeb --- /dev/null +++ b/data/2025/2504_10xxx/2504.10483/images/6d94cb8882bfecafaab1bbb1acfc06fe066cc5249188812276c3541185a1e8ca.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fdd3cbe4abad3e93440d0e908e4027fbbcaebde66e7cfbb925dcee7d32cc7958 +size 3033 diff --git a/data/2025/2504_10xxx/2504.10483/images/6e3d867ad461854a2b4897fe73b4d029c7a1b0fe01e5b82ec27c5b3c3fbac347.jpg b/data/2025/2504_10xxx/2504.10483/images/6e3d867ad461854a2b4897fe73b4d029c7a1b0fe01e5b82ec27c5b3c3fbac347.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bc975793f26470a8ef6380a7c106a30cde2b9d0b --- /dev/null +++ b/data/2025/2504_10xxx/2504.10483/images/6e3d867ad461854a2b4897fe73b4d029c7a1b0fe01e5b82ec27c5b3c3fbac347.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:db73e716115b84913c806f70149c446b285f8d1fd1cdef4ae086cc7f044ce430 +size 10227 diff --git a/data/2025/2504_10xxx/2504.10483/images/6ff1982f4ad41e9b7eb323cd44e02190df89a9a8943279c88efeddf464d1cd62.jpg b/data/2025/2504_10xxx/2504.10483/images/6ff1982f4ad41e9b7eb323cd44e02190df89a9a8943279c88efeddf464d1cd62.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1a2ba5576b1db005b61c69e2c7a3fbf665ae4e03 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10483/images/6ff1982f4ad41e9b7eb323cd44e02190df89a9a8943279c88efeddf464d1cd62.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:177af2204c65354adaf2a6e8ae81c6910eee5a71b070b14d7b5961ed8eb6c146 +size 18444 diff --git a/data/2025/2504_10xxx/2504.10483/images/70ad31221b2f68af8bce71e05adc8b7df7169655277a3c20d0ba2992aa8a31cc.jpg b/data/2025/2504_10xxx/2504.10483/images/70ad31221b2f68af8bce71e05adc8b7df7169655277a3c20d0ba2992aa8a31cc.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4b7df7f8c4472507805cad43d58f775584baba74 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10483/images/70ad31221b2f68af8bce71e05adc8b7df7169655277a3c20d0ba2992aa8a31cc.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1c2db7afd042862f510e1020e8d3e2d1439a5dc088a9922da88ef337cd6bc5b1 +size 51211 diff --git a/data/2025/2504_10xxx/2504.10483/images/70e94662b383073ac040caac999f97c380f6c9dac51ab4b893a0a6a3f105693c.jpg b/data/2025/2504_10xxx/2504.10483/images/70e94662b383073ac040caac999f97c380f6c9dac51ab4b893a0a6a3f105693c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..36abb8de70a3eff921daf8de4e89bf0ccf5b92a8 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10483/images/70e94662b383073ac040caac999f97c380f6c9dac51ab4b893a0a6a3f105693c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c13d8a1cb917b34466a672d41204cea43905f1560b1111d7368f233c2412da5a +size 7279 diff --git a/data/2025/2504_10xxx/2504.10483/images/732c2cd3f89282ebd0a31c3d19337f2d8bfc6478b1fdfeb81e104bd6b2f3b41d.jpg b/data/2025/2504_10xxx/2504.10483/images/732c2cd3f89282ebd0a31c3d19337f2d8bfc6478b1fdfeb81e104bd6b2f3b41d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..475761aa8f89aaa86fc4f30df80f9192f8cba4fa --- /dev/null +++ b/data/2025/2504_10xxx/2504.10483/images/732c2cd3f89282ebd0a31c3d19337f2d8bfc6478b1fdfeb81e104bd6b2f3b41d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:54e72ef1c461f6ea3b8476dd85f50368d2a3959718a21eb44343906a796fab8a +size 8997 diff --git a/data/2025/2504_10xxx/2504.10483/images/737ee93bcfc4726d031fc27ae03451e0ae5dc24d68c85911901ac5255cefdf3a.jpg b/data/2025/2504_10xxx/2504.10483/images/737ee93bcfc4726d031fc27ae03451e0ae5dc24d68c85911901ac5255cefdf3a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b557a5c822a8831ae062201c8e11eaa058959f4c --- /dev/null +++ b/data/2025/2504_10xxx/2504.10483/images/737ee93bcfc4726d031fc27ae03451e0ae5dc24d68c85911901ac5255cefdf3a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:898ea2c2fe38582d65971de75070f7311646496a32c66d28554132bf39029b9e +size 2865 diff --git a/data/2025/2504_10xxx/2504.10483/images/77c564390a6c95f2c38a5d95ea756c20cec3d4210e716525a358c5d26a1aea66.jpg b/data/2025/2504_10xxx/2504.10483/images/77c564390a6c95f2c38a5d95ea756c20cec3d4210e716525a358c5d26a1aea66.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ecbce7d84075f29bf493b1a4a68e2dcbdd34f661 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10483/images/77c564390a6c95f2c38a5d95ea756c20cec3d4210e716525a358c5d26a1aea66.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8291d8954ca802f3c5859119f3072331f5b07d7d1104ddc13084895da0221ec9 +size 66499 diff --git a/data/2025/2504_10xxx/2504.10483/images/84102d205259392e556ce48c91dd5529c425e9dd4d94c96963ebdc7eb5e385a1.jpg b/data/2025/2504_10xxx/2504.10483/images/84102d205259392e556ce48c91dd5529c425e9dd4d94c96963ebdc7eb5e385a1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9457d2a0d0ef6e7dfa481376ca0049c311e0c6f5 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10483/images/84102d205259392e556ce48c91dd5529c425e9dd4d94c96963ebdc7eb5e385a1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1e724db9abd80456249a0717e6701c849658410147ea650d2fd7e046cbcbf867 +size 25235 diff --git a/data/2025/2504_10xxx/2504.10483/images/938d6a0dae447cad9ea39e63107549156e33218b9ca6a3565b0607e1df7efb1c.jpg b/data/2025/2504_10xxx/2504.10483/images/938d6a0dae447cad9ea39e63107549156e33218b9ca6a3565b0607e1df7efb1c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e9087fc12120a9033b0166fa73b6048b82311eab --- /dev/null +++ b/data/2025/2504_10xxx/2504.10483/images/938d6a0dae447cad9ea39e63107549156e33218b9ca6a3565b0607e1df7efb1c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:81ee06cef830f3e5b900bd01201cce0b6761d074516b38b8bfb1f50a97ceea1f +size 8657 diff --git a/data/2025/2504_10xxx/2504.10483/images/96122cd52837ce36699d9fc4d592f85bbb3c75af36fa0f639b4dd5f6896b3775.jpg b/data/2025/2504_10xxx/2504.10483/images/96122cd52837ce36699d9fc4d592f85bbb3c75af36fa0f639b4dd5f6896b3775.jpg new file mode 100644 index 0000000000000000000000000000000000000000..13ab4c08a90a95f281b6c5c15d785bfbd62b3c47 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10483/images/96122cd52837ce36699d9fc4d592f85bbb3c75af36fa0f639b4dd5f6896b3775.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eb3d4008f78d2d7813f1b0dfea61a449232a0bfc76c71786b3956e48f3c80899 +size 1998 diff --git a/data/2025/2504_10xxx/2504.10483/images/98d5346a3f5d469ff855020e5e08533d39161900a58da1f5b6d2bc7be4a914b7.jpg b/data/2025/2504_10xxx/2504.10483/images/98d5346a3f5d469ff855020e5e08533d39161900a58da1f5b6d2bc7be4a914b7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d7464caeb0d7cabfafd97efbfc36eb05c5248b43 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10483/images/98d5346a3f5d469ff855020e5e08533d39161900a58da1f5b6d2bc7be4a914b7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:05455d9d4b5b4ed49c781079b09f991ad394f976527c69445ee2ca941d41d55a +size 9687 diff --git a/data/2025/2504_10xxx/2504.10483/images/9a812b0e81a6130477702ee4c4a9eb833570d2120becb8138fb64bcbdeb3bbb7.jpg b/data/2025/2504_10xxx/2504.10483/images/9a812b0e81a6130477702ee4c4a9eb833570d2120becb8138fb64bcbdeb3bbb7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..361329b62abba93a5ded79c958c9e94b20ed3410 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10483/images/9a812b0e81a6130477702ee4c4a9eb833570d2120becb8138fb64bcbdeb3bbb7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e43fbb559c2e5b3aab7a5654cbf2b043ec1f638d5fb93adc238ab73ecea90bc5 +size 36729 diff --git a/data/2025/2504_10xxx/2504.10483/images/9b918e4e0af32f5170c59f334fc411b4925cc05d2d86c173ad1e1740e0c6ca05.jpg b/data/2025/2504_10xxx/2504.10483/images/9b918e4e0af32f5170c59f334fc411b4925cc05d2d86c173ad1e1740e0c6ca05.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8342738dbb884283d099a7229c3e5426a077353f --- /dev/null +++ b/data/2025/2504_10xxx/2504.10483/images/9b918e4e0af32f5170c59f334fc411b4925cc05d2d86c173ad1e1740e0c6ca05.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d4ae711ec1991af4d7bfee6322793a19a7e982403fd21187801224300d30b285 +size 12558 diff --git a/data/2025/2504_10xxx/2504.10483/images/9c9161a3c487fa16b6c142d4839b37e74004fe3be79b441c86019cad3af5488a.jpg b/data/2025/2504_10xxx/2504.10483/images/9c9161a3c487fa16b6c142d4839b37e74004fe3be79b441c86019cad3af5488a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b9cc644b7df7c21dac188ee5a3a8a20a8dc6fc10 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10483/images/9c9161a3c487fa16b6c142d4839b37e74004fe3be79b441c86019cad3af5488a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1fc68baf037cc404b0db71f673682a6afe1980120fac4a8d799208444d2be329 +size 17179 diff --git a/data/2025/2504_10xxx/2504.10483/images/a9541f9988db720abb731ca660a0d104cdb172ec8d219145d7c412f88112e0a4.jpg b/data/2025/2504_10xxx/2504.10483/images/a9541f9988db720abb731ca660a0d104cdb172ec8d219145d7c412f88112e0a4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fef7f900dc4b5163f6a41ef398242f327cc5acff --- /dev/null +++ b/data/2025/2504_10xxx/2504.10483/images/a9541f9988db720abb731ca660a0d104cdb172ec8d219145d7c412f88112e0a4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:70d8759a5b8f93df97d954c32c16f8c25f6e4461b8ca57e9bd56b0393bda614f +size 25351 diff --git a/data/2025/2504_10xxx/2504.10483/images/adaae38fc3e6ec577cf064cdecf30feb67e4483e73d3134e2edd66601a68e38b.jpg b/data/2025/2504_10xxx/2504.10483/images/adaae38fc3e6ec577cf064cdecf30feb67e4483e73d3134e2edd66601a68e38b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e1a3629d25ae2564266737daa295c9dc392f358d --- /dev/null +++ b/data/2025/2504_10xxx/2504.10483/images/adaae38fc3e6ec577cf064cdecf30feb67e4483e73d3134e2edd66601a68e38b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cfd5125e4a419a95b236cadad68520bece6ce7ce0e4cc93ce6fce6b8aab1a9c9 +size 36428 diff --git a/data/2025/2504_10xxx/2504.10483/images/ae8c4a8f75e3bc72d7182c5a9a929d9d0aa2a4238ec623bfea7b8c6ef63012e0.jpg b/data/2025/2504_10xxx/2504.10483/images/ae8c4a8f75e3bc72d7182c5a9a929d9d0aa2a4238ec623bfea7b8c6ef63012e0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b5a4d4b7f9cdf2bb7460997a22d7f6b7c4caed57 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10483/images/ae8c4a8f75e3bc72d7182c5a9a929d9d0aa2a4238ec623bfea7b8c6ef63012e0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:21ce1a87ee92ce587c857be4239584b826db1374b564d503fd23e60aa7c6a9bf +size 2804 diff --git a/data/2025/2504_10xxx/2504.10483/images/aed0aebb7aa7f26a90775c3d741b04e5ee30a74fde9c4116c849efd5d8dfb03f.jpg b/data/2025/2504_10xxx/2504.10483/images/aed0aebb7aa7f26a90775c3d741b04e5ee30a74fde9c4116c849efd5d8dfb03f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7b2fa0150d41a1d0986388eeedb653d969969b02 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10483/images/aed0aebb7aa7f26a90775c3d741b04e5ee30a74fde9c4116c849efd5d8dfb03f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8edbeef2dbcad5396215e51a4e0598a549a8cade55bf73aab34de0354cd3a9de +size 2986 diff --git a/data/2025/2504_10xxx/2504.10483/images/b04f9900754a787b98e10d234ab8b15d8eca65c470b50220f75f889f9a11f468.jpg b/data/2025/2504_10xxx/2504.10483/images/b04f9900754a787b98e10d234ab8b15d8eca65c470b50220f75f889f9a11f468.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ca4302014476faaa63fa0cca303ae52d59955dee --- /dev/null +++ b/data/2025/2504_10xxx/2504.10483/images/b04f9900754a787b98e10d234ab8b15d8eca65c470b50220f75f889f9a11f468.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:30e2ebfcf969358cc0347dfce6334454793c9f7e478abf9d7995bedd08017dd6 +size 7652 diff --git a/data/2025/2504_10xxx/2504.10483/images/b0f6d7040a425954eeab73ec3015199d815cfcfb9afc1f1b22d8e551e28f6b7f.jpg b/data/2025/2504_10xxx/2504.10483/images/b0f6d7040a425954eeab73ec3015199d815cfcfb9afc1f1b22d8e551e28f6b7f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6363abccf3e500b6f2bf889364fe1c8c26d66dde --- /dev/null +++ b/data/2025/2504_10xxx/2504.10483/images/b0f6d7040a425954eeab73ec3015199d815cfcfb9afc1f1b22d8e551e28f6b7f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7aeb34bbc307e345781c326e9f93b6ecbcdfd8f7be76fa8da9e42630b03b7e1f +size 166257 diff --git a/data/2025/2504_10xxx/2504.10483/images/bbdd03c0a00e3108ede5e16f8df6580adb828343ef7b9f14bbcf5aac90c1174e.jpg b/data/2025/2504_10xxx/2504.10483/images/bbdd03c0a00e3108ede5e16f8df6580adb828343ef7b9f14bbcf5aac90c1174e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..98597d68c59baeb85ebd2bb641b7a00e0d7230b5 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10483/images/bbdd03c0a00e3108ede5e16f8df6580adb828343ef7b9f14bbcf5aac90c1174e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:43fd44ba52dd734699122c0ccc6fb0120d40c15d0c4226b50ee816ea517a05bd +size 23010 diff --git a/data/2025/2504_10xxx/2504.10483/images/bd115de09eaaa1deabc44ecd1b4506a518d6f0770f80afac9bf401d68d93999c.jpg b/data/2025/2504_10xxx/2504.10483/images/bd115de09eaaa1deabc44ecd1b4506a518d6f0770f80afac9bf401d68d93999c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..62d7e428c8ca53cb80eac036d7b3db3474b65760 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10483/images/bd115de09eaaa1deabc44ecd1b4506a518d6f0770f80afac9bf401d68d93999c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1aa5fc99fab202b95cb8499e7ff70e0ab53d06e6a4963a82c2239064a70e7000 +size 8535 diff --git a/data/2025/2504_10xxx/2504.10483/images/be5f24596e485bced0e6830cd32cfd126187b4d5cb069e03094ce1e4cef97c4d.jpg b/data/2025/2504_10xxx/2504.10483/images/be5f24596e485bced0e6830cd32cfd126187b4d5cb069e03094ce1e4cef97c4d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a728344326c78f47dab3849eb915aeff42677b0e --- /dev/null +++ b/data/2025/2504_10xxx/2504.10483/images/be5f24596e485bced0e6830cd32cfd126187b4d5cb069e03094ce1e4cef97c4d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4aa7e493f4e835beb69c5c17538fee4cb61f6612768d74186dcdd055d3545b40 +size 9039 diff --git a/data/2025/2504_10xxx/2504.10483/images/c0ccebd2a37cbf0f831d5a90307f3271837c5bd97b301ad920db8dddccafe7f4.jpg b/data/2025/2504_10xxx/2504.10483/images/c0ccebd2a37cbf0f831d5a90307f3271837c5bd97b301ad920db8dddccafe7f4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..12ee903dbf9d4731136c50df3dab884619fee2aa --- /dev/null +++ b/data/2025/2504_10xxx/2504.10483/images/c0ccebd2a37cbf0f831d5a90307f3271837c5bd97b301ad920db8dddccafe7f4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7d35db132f1f965fd58d9c55c4340ad2d21cbb9567df5f5bbe6d00734a5a2c46 +size 7646 diff --git a/data/2025/2504_10xxx/2504.10483/images/d01256bacff19b6a3cc55c94487b9b303cf3b4501268a5a38f314e22bf356833.jpg b/data/2025/2504_10xxx/2504.10483/images/d01256bacff19b6a3cc55c94487b9b303cf3b4501268a5a38f314e22bf356833.jpg new file mode 100644 index 0000000000000000000000000000000000000000..21c0b68480d1925f50cfedac15e9b07eb6dc2ff6 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10483/images/d01256bacff19b6a3cc55c94487b9b303cf3b4501268a5a38f314e22bf356833.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d22fdd479dcb0b00ce43abb7bf2b09e2f5193e6cb463f4f530dcbb8f8467a11e +size 6517 diff --git a/data/2025/2504_10xxx/2504.10483/images/d1e9c9c23709cb744c68d3c891fec414dcd095ab25592add4488c6349b77a306.jpg b/data/2025/2504_10xxx/2504.10483/images/d1e9c9c23709cb744c68d3c891fec414dcd095ab25592add4488c6349b77a306.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f6e24ce385ba78b13fdb98c17466f0906cf1eb2d --- /dev/null +++ b/data/2025/2504_10xxx/2504.10483/images/d1e9c9c23709cb744c68d3c891fec414dcd095ab25592add4488c6349b77a306.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:14afc5da6587a2d2bdeadcff780679b45b0dcdc7eef844f7c334dbb426eafe1a +size 3401 diff --git a/data/2025/2504_10xxx/2504.10483/images/e6f4ef56876b31e928da107740a6f9255035f413280dccaa59cbef9c0972b9c6.jpg b/data/2025/2504_10xxx/2504.10483/images/e6f4ef56876b31e928da107740a6f9255035f413280dccaa59cbef9c0972b9c6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..eb6d563809fc2d7f80dac70ffc92d7e7c0b0e1e6 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10483/images/e6f4ef56876b31e928da107740a6f9255035f413280dccaa59cbef9c0972b9c6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f730a5326a7811dce334cb9dcdc2ce74544e44cee220b22b5acdc8093612ccc8 +size 11098 diff --git a/data/2025/2504_10xxx/2504.10483/images/ebd09afb994a91ca2cde8433cbeccb25b078c9df362e91163c7c0a9f19d05664.jpg b/data/2025/2504_10xxx/2504.10483/images/ebd09afb994a91ca2cde8433cbeccb25b078c9df362e91163c7c0a9f19d05664.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5b4cb611d94188652e66a563f4a72f85c365c41d --- /dev/null +++ b/data/2025/2504_10xxx/2504.10483/images/ebd09afb994a91ca2cde8433cbeccb25b078c9df362e91163c7c0a9f19d05664.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5ddddd444e16ccb5846d867a6616fc4eb85f9931948ce0406510be7990ffa3bb +size 50972 diff --git a/data/2025/2504_10xxx/2504.10483/images/ecca1ad26ed6740024c2b424367c42553d5a44b65aea95da9b3a313377175628.jpg b/data/2025/2504_10xxx/2504.10483/images/ecca1ad26ed6740024c2b424367c42553d5a44b65aea95da9b3a313377175628.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ac01ba6634573b6b03dfafb7a50f703f562fcfda --- /dev/null +++ b/data/2025/2504_10xxx/2504.10483/images/ecca1ad26ed6740024c2b424367c42553d5a44b65aea95da9b3a313377175628.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:152f9194ac11d574ff17979f8cf5b1497783fc51770efaeb2ecf7308518e1133 +size 1899 diff --git a/data/2025/2504_10xxx/2504.10483/images/f4c6791c5721f3d932ba8c2d44bb386e00abe01bc7069b0e7e53337554bc6967.jpg b/data/2025/2504_10xxx/2504.10483/images/f4c6791c5721f3d932ba8c2d44bb386e00abe01bc7069b0e7e53337554bc6967.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1b1427148b720af78d513c581c74cf750e313614 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10483/images/f4c6791c5721f3d932ba8c2d44bb386e00abe01bc7069b0e7e53337554bc6967.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d81fe36711190c424cd1bdc7f5674c7792f4f8a35c4ad455094bcc73948fb714 +size 37134 diff --git a/data/2025/2504_10xxx/2504.10483/images/fb38cb45bdc201bce864203f0564b39c6f26db98d3284c2cba327117327406b1.jpg b/data/2025/2504_10xxx/2504.10483/images/fb38cb45bdc201bce864203f0564b39c6f26db98d3284c2cba327117327406b1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1a047bd7b2809904861f8c8d3554e2b5546888e8 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10483/images/fb38cb45bdc201bce864203f0564b39c6f26db98d3284c2cba327117327406b1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:10e483d8635cd2a553ba9bcfd2706cc99751dbbbeb8ea03d911c96e8d40feba3 +size 19621 diff --git a/data/2025/2504_10xxx/2504.10483/images/fd12988f16d053f4c42208367c590281d1f1ccceeec126036badaef31a99e14a.jpg b/data/2025/2504_10xxx/2504.10483/images/fd12988f16d053f4c42208367c590281d1f1ccceeec126036badaef31a99e14a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..140182cde0573a5f5ce22470666829f5eb9511dc --- /dev/null +++ b/data/2025/2504_10xxx/2504.10483/images/fd12988f16d053f4c42208367c590281d1f1ccceeec126036badaef31a99e14a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:050fc6780b79dca19cde999d4cc146eb81c6d75b42d1d875c33109fbcd516b3f +size 7737 diff --git a/data/2025/2504_10xxx/2504.10483/images/ffb775a5a42001303337822551611cf8813cb9ca3412dea2c0caff6206c92060.jpg b/data/2025/2504_10xxx/2504.10483/images/ffb775a5a42001303337822551611cf8813cb9ca3412dea2c0caff6206c92060.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2c42e5fd7c2d97f46adf4f2293109235ffa9ed2e --- /dev/null +++ b/data/2025/2504_10xxx/2504.10483/images/ffb775a5a42001303337822551611cf8813cb9ca3412dea2c0caff6206c92060.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0ccab22bf66e1b06af887b046a9d9ac73721be6fbce5ff5578a350c847a74f9d +size 9352 diff --git a/data/2025/2504_10xxx/2504.10483/layout.json b/data/2025/2504_10xxx/2504.10483/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..a7a52a9868cf879399e189184ad5c1bb22783f47 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10483/layout.json @@ -0,0 +1,11558 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 86, + 102, + 523, + 138 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 102, + 523, + 138 + ], + "spans": [ + { + "bbox": [ + 86, + 102, + 523, + 138 + ], + "type": "text", + "content": "REPA-E: Unlocking VAE for End-to-End Tuning with Latent Diffusion Transformers" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 118, + 160, + 491, + 190 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 160, + 491, + 190 + ], + "spans": [ + { + "bbox": [ + 118, + 160, + 491, + 190 + ], + "type": "text", + "content": "Xingjian Leng" + }, + { + "bbox": [ + 118, + 160, + 491, + 190 + ], + "type": "inline_equation", + "content": "^{\\alpha \\star}" + }, + { + "bbox": [ + 118, + 160, + 491, + 190 + ], + "type": "text", + "content": " Jaskirat Singh" + }, + { + "bbox": [ + 118, + 160, + 491, + 190 + ], + "type": "inline_equation", + "content": "^{\\alpha \\star}" + }, + { + "bbox": [ + 118, + 160, + 491, + 190 + ], + "type": "text", + "content": " Yunzhong Hou" + }, + { + "bbox": [ + 118, + 160, + 491, + 190 + ], + "type": "inline_equation", + "content": "^{\\alpha}" + }, + { + "bbox": [ + 118, + 160, + 491, + 190 + ], + "type": "text", + "content": " Zhenchang Xing" + }, + { + "bbox": [ + 118, + 160, + 491, + 190 + ], + "type": "inline_equation", + "content": "^{\\beta}" + }, + { + "bbox": [ + 118, + 160, + 491, + 190 + ], + "type": "text", + "content": " Saining Xie" + }, + { + "bbox": [ + 118, + 160, + 491, + 190 + ], + "type": "inline_equation", + "content": "^{\\chi}" + }, + { + "bbox": [ + 118, + 160, + 491, + 190 + ], + "type": "text", + "content": " Liang Zheng" + }, + { + "bbox": [ + 118, + 160, + 491, + 190 + ], + "type": "inline_equation", + "content": "^{\\alpha}" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 118, + 194, + 491, + 209 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 194, + 491, + 209 + ], + "spans": [ + { + "bbox": [ + 118, + 194, + 491, + 209 + ], + "type": "inline_equation", + "content": "^{\\alpha}" + }, + { + "bbox": [ + 118, + 194, + 491, + 209 + ], + "type": "text", + "content": "Australian National University " + }, + { + "bbox": [ + 118, + 194, + 491, + 209 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 118, + 194, + 491, + 209 + ], + "type": "text", + "content": "Data61 CSIRO " + }, + { + "bbox": [ + 118, + 194, + 491, + 209 + ], + "type": "inline_equation", + "content": "\\chi" + }, + { + "bbox": [ + 118, + 194, + 491, + 209 + ], + "type": "text", + "content": "New York University" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 115, + 212, + 495, + 224 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 212, + 495, + 224 + ], + "spans": [ + { + "bbox": [ + 115, + 212, + 495, + 224 + ], + "type": "text", + "content": "{xingjian.length\\*, jaskirat.singh\\*, yunzhong.hou, liang.zheng}@anu.edu.au" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 165, + 226, + 443, + 237 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 165, + 226, + 443, + 237 + ], + "spans": [ + { + "bbox": [ + 165, + 226, + 443, + 237 + ], + "type": "text", + "content": "zhenchang.xing@data61.csiro.au saining.xie@nyu.edu" + } + ] + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 60, + 255, + 138, + 396 + ], + "blocks": [ + { + "bbox": [ + 60, + 255, + 138, + 396 + ], + "lines": [ + { + "bbox": [ + 60, + 255, + 138, + 396 + ], + "spans": [ + { + "bbox": [ + 60, + 255, + 138, + 396 + ], + "type": "image", + "image_path": "6e3d867ad461854a2b4897fe73b4d029c7a1b0fe01e5b82ec27c5b3c3fbac347.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 81, + 396, + 135, + 418 + ], + "lines": [ + { + "bbox": [ + 81, + 396, + 135, + 418 + ], + "spans": [ + { + "bbox": [ + 81, + 396, + 135, + 418 + ], + "type": "text", + "content": "a) Traditional LDM Training" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 54, + 422, + 555, + 489 + ], + "lines": [ + { + "bbox": [ + 54, + 422, + 555, + 489 + ], + "spans": [ + { + "bbox": [ + 54, + 422, + 555, + 489 + ], + "type": "text", + "content": "Figure 1. Can we unlock VAE for end-to-end tuning with latent-diffusion models? - Traditional deep learning wisdom dictates that end-to-end training is often preferable when possible. However, latent diffusion models usually only update the generator network while keeping the variational auto-encoder (VAE) fixed (a). This is because directly using the diffusion loss to update the VAE (b) causes the latent space to collapse. We show that while direct diffusion-loss is ineffective, end-to-end training can be unlocked through the representation-alignment (REPA) loss - allowing both encoder and diffusion model to be jointly tuned during the training process (c). Notably, this allows for significantly accelerated training; speeding up training by over " + }, + { + "bbox": [ + 54, + 422, + 555, + 489 + ], + "type": "inline_equation", + "content": "17\\times" + }, + { + "bbox": [ + 54, + 422, + 555, + 489 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 54, + 422, + 555, + 489 + ], + "type": "inline_equation", + "content": "45\\times" + }, + { + "bbox": [ + 54, + 422, + 555, + 489 + ], + "type": "text", + "content": " over REPA and vanilla training recipes, respectively (d)." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 161, + 255, + 375, + 396 + ], + "blocks": [ + { + "bbox": [ + 161, + 255, + 375, + 396 + ], + "lines": [ + { + "bbox": [ + 161, + 255, + 375, + 396 + ], + "spans": [ + { + "bbox": [ + 161, + 255, + 375, + 396 + ], + "type": "image", + "image_path": "a9541f9988db720abb731ca660a0d104cdb172ec8d219145d7c412f88112e0a4.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 174, + 396, + 249, + 418 + ], + "lines": [ + { + "bbox": [ + 174, + 396, + 249, + 418 + ], + "spans": [ + { + "bbox": [ + 174, + 396, + 249, + 418 + ], + "type": "text", + "content": "b) Naive End-to-End LDM Training" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 304, + 396, + 345, + 417 + ], + "lines": [ + { + "bbox": [ + 304, + 396, + 345, + 417 + ], + "spans": [ + { + "bbox": [ + 304, + 396, + 345, + 417 + ], + "type": "text", + "content": "c) REPA-E (Ours)" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 394, + 258, + 552, + 394 + ], + "blocks": [ + { + "bbox": [ + 394, + 258, + 552, + 394 + ], + "lines": [ + { + "bbox": [ + 394, + 258, + 552, + 394 + ], + "spans": [ + { + "bbox": [ + 394, + 258, + 552, + 394 + ], + "type": "image", + "image_path": "3d0d5d48be88986c8ac48ee99088103e4823610d4c41d445054fcca80dcf74c9.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 410, + 396, + 536, + 417 + ], + "lines": [ + { + "bbox": [ + 410, + 396, + 536, + 417 + ], + "spans": [ + { + "bbox": [ + 410, + 396, + 536, + 417 + ], + "type": "text", + "content": "d) Training Steps vs. FID-50K Improved Generation Performance" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "bbox": [ + 151, + 498, + 200, + 510 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 151, + 498, + 200, + 510 + ], + "spans": [ + { + "bbox": [ + 151, + 498, + 200, + 510 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 53, + 524, + 297, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 524, + 297, + 704 + ], + "spans": [ + { + "bbox": [ + 53, + 524, + 297, + 704 + ], + "type": "text", + "content": "In this paper we tackle a fundamental question: \"Can we train latent diffusion models together with the variational auto-encoder (VAE) tokenizer in an end-to-end manner?\" Traditional deep-learning wisdom dictates that end-to-end training is often preferable when possible. However, for latent diffusion transformers, it is observed that end-to-end training both VAE and diffusion-model using standard diffusion-loss is ineffective, even causing a degradation in final performance. We show that while diffusion loss is ineffective, end-to-end training can be unlocked through the representation-alignment (REPA) loss - allowing both VAE and diffusion model to be jointly tuned during the training process. Despite its simplicity, the proposed training recipe (REPA-E) shows remarkable performance; speeding up diffusion model training by over " + }, + { + "bbox": [ + 53, + 524, + 297, + 704 + ], + "type": "inline_equation", + "content": "17 \\times" + }, + { + "bbox": [ + 53, + 524, + 297, + 704 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 53, + 524, + 297, + 704 + ], + "type": "inline_equation", + "content": "45 \\times" + }, + { + "bbox": [ + 53, + 524, + 297, + 704 + ], + "type": "text", + "content": " over REPA and" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 499, + 555, + 596 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 499, + 555, + 596 + ], + "spans": [ + { + "bbox": [ + 313, + 499, + 555, + 596 + ], + "type": "text", + "content": "vanilla training recipes, respectively. Interestingly, we observe that end-to-end tuning with REPA-E also improves the VAE itself; leading to improved latent space structure and downstream generation performance. In terms of final performance, our approach sets a new state-of-the-art; achieving FID of 1.12 and 1.69 with and without classifier-free guidance on ImageNet " + }, + { + "bbox": [ + 313, + 499, + 555, + 596 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 313, + 499, + 555, + 596 + ], + "type": "text", + "content": ". Code is available at https://end2end-diffusion.github.io." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 314, + 618, + 394, + 631 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 618, + 394, + 631 + ], + "spans": [ + { + "bbox": [ + 314, + 618, + 394, + 631 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 313, + 638, + 555, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 638, + 555, + 723 + ], + "spans": [ + { + "bbox": [ + 313, + 638, + 555, + 723 + ], + "type": "text", + "content": "End-to-end training has propelled the field forward for the past decade. It is understood that incorporating more components into end-to-end training can lead to increased performance, as evidenced by the evolution of the RCNN family [14, 15, 38]. With that said, training schemes of latent diffusion models (LDMs) [40] remain two-stage: first, the variational auto-encoder (VAE) [22] is trained with the re" + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 14, + 219, + 35, + 568 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 219, + 35, + 568 + ], + "spans": [ + { + "bbox": [ + 14, + 219, + 35, + 568 + ], + "type": "text", + "content": "arXiv:2504.10483v3 [cs.CV] 22 Oct 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 70, + 712, + 142, + 722 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 712, + 142, + 722 + ], + "spans": [ + { + "bbox": [ + 70, + 712, + 142, + 722 + ], + "type": "text", + "content": "* Equal Contribution." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 75, + 62, + 282, + 220 + ], + "blocks": [ + { + "bbox": [ + 75, + 62, + 282, + 220 + ], + "lines": [ + { + "bbox": [ + 75, + 62, + 282, + 220 + ], + "spans": [ + { + "bbox": [ + 75, + 62, + 282, + 220 + ], + "type": "image", + "image_path": "58248ecea17233f3151ed2c3fa2b6660f014b9cc50ef2d343f2fafeecf08c27b.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 97, + 223, + 259, + 233 + ], + "lines": [ + { + "bbox": [ + 97, + 223, + 259, + 233 + ], + "spans": [ + { + "bbox": [ + 97, + 223, + 259, + 233 + ], + "type": "text", + "content": "(a) PCA Analysis on VAE Latent Space Structure" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 334, + 63, + 533, + 220 + ], + "blocks": [ + { + "bbox": [ + 334, + 63, + 533, + 220 + ], + "lines": [ + { + "bbox": [ + 334, + 63, + 533, + 220 + ], + "spans": [ + { + "bbox": [ + 334, + 63, + 533, + 220 + ], + "type": "image", + "image_path": "6ff1982f4ad41e9b7eb323cd44e02190df89a9a8943279c88efeddf464d1cd62.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 338, + 223, + 529, + 233 + ], + "lines": [ + { + "bbox": [ + 338, + 223, + 529, + 233 + ], + "spans": [ + { + "bbox": [ + 338, + 223, + 529, + 233 + ], + "type": "text", + "content": "(b) Performance Improvements with REPA-E (400K Steps)" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 54, + 239, + 555, + 316 + ], + "lines": [ + { + "bbox": [ + 54, + 239, + 555, + 316 + ], + "spans": [ + { + "bbox": [ + 54, + 239, + 555, + 316 + ], + "type": "text", + "content": "Figure 2. End-to-End Training Automatically Improves VAE Latent-Space Structure. (a) Following [24], we visualize latent space structure from different VAEs before and after end-to-end training using principal component analysis (PCA) that projects them to three channels colored by RGB. We consider SD-VAE [40], and IN-VAE" + }, + { + "bbox": [ + 54, + 239, + 555, + 316 + ], + "type": "inline_equation", + "content": "^1" + }, + { + "bbox": [ + 54, + 239, + 555, + 316 + ], + "type": "text", + "content": ", a " + }, + { + "bbox": [ + 54, + 239, + 555, + 316 + ], + "type": "inline_equation", + "content": "16 \\times" + }, + { + "bbox": [ + 54, + 239, + 555, + 316 + ], + "type": "text", + "content": " downsampling, 32-channel VAE trained on ImageNet [6]. For SD-VAE we find that latent representations have high-frequency noise. Applying end-to-end tuning helps learning a more smooth and less noisy latent representation. Interestingly to the contrast, the latent space for IN-VAE is over-smoothed (e.g., row-2). Applying end-to-end tuning automatically helps learn a more detailed latent space structure to best support final generation performance. (b) Jointly tuning both VAE and latent diffusion model (LDM) significantly improves final generation performance (gFID) across different VAE architectures." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 54, + 332, + 294, + 357 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 332, + 294, + 357 + ], + "spans": [ + { + "bbox": [ + 54, + 332, + 294, + 357 + ], + "type": "text", + "content": "construction loss; then, the diffusion model is trained with the diffusion loss while keeping the VAE fixed (see Fig. 1a)." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 54, + 358, + 295, + 538 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 358, + 295, + 538 + ], + "spans": [ + { + "bbox": [ + 54, + 358, + 295, + 538 + ], + "type": "text", + "content": "The above two-stage division of the LDM training process, though popular, leads to a challenging optimization task: \"How to best optimize the representation from first stage (VAE) for optimal performance while training the second stage (diffusion model)?\" While recent works study the interplay between the performance of the two stages [24, 44], they are often limited to empirical analysis, which may vary depending on the architecture and training setting for both the VAE and the diffusion model. For instance, in a concurrent work [44] show that the latent space of popular autoencoders e.g., SD-VAE [40] suffer from high-frequency noise / components. However, as seen in Fig. 2 & 6, while the same holds for some VAEs (e.g. SD-VAE), it might not be true for other VAE architectures — which instead might suffer from an over-smoothed latent space (Fig. 2, 6)." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 541, + 295, + 660 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 541, + 295, + 660 + ], + "spans": [ + { + "bbox": [ + 55, + 541, + 295, + 660 + ], + "type": "text", + "content": "In this paper, we therefore ask a fundamental question: \"Can we jointly tune both VAE and LDM in an end-to-end manner to best optimize final generation performance?\" Technically, it is straightforward to do end-to-end LDM training by simply back-propagating the diffusion loss to the VAE tokenizer. However, experiments (§3) reveal that this naive approach for end-to-end training is ineffective. The diffusion loss encourages learning a simpler latent space structure which is easier for denoising objective (refer §3.1), but leads to reduced generation performance (Fig. 1d)." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 55, + 662, + 296, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 662, + 296, + 723 + ], + "spans": [ + { + "bbox": [ + 55, + 662, + 296, + 723 + ], + "type": "text", + "content": "To address this, we propose REPA-E; an end-to-end training recipe using representation alignment loss [54]. We show that while the diffusion loss is ineffective, end-to-end tuning can be unlocked through the recently proposed representation-alignment (REPA) loss - allowing both VAE" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 332, + 555, + 476 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 332, + 555, + 476 + ], + "spans": [ + { + "bbox": [ + 313, + 332, + 555, + 476 + ], + "type": "text", + "content": "and diffusion model to be jointly tuned during training process. Through extensive evaluations, we demonstrate that end-to-end tuning with REPA-E offers several advantages; End-to-End Training Leads to Accelerated Generation Performance; speeding up diffusion training by over " + }, + { + "bbox": [ + 313, + 332, + 555, + 476 + ], + "type": "inline_equation", + "content": "17 \\times" + }, + { + "bbox": [ + 313, + 332, + 555, + 476 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 332, + 555, + 476 + ], + "type": "inline_equation", + "content": "45 \\times" + }, + { + "bbox": [ + 313, + 332, + 555, + 476 + ], + "type": "text", + "content": " over REPA and vanilla training recipes (Fig. 1d). Furthermore, it also helps significantly improve the final generation performance. For instance as seen in Fig. 1d, we find that when using the popular SiT-XL [30] architecture, REPA-E reaches an FID of 4.07 within 400K steps, significantly boosting final performance over even REPA which only only reaches a final FID for 5.9 after 4M steps [54]." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 476, + 556, + 679 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 476, + 556, + 679 + ], + "spans": [ + { + "bbox": [ + 313, + 476, + 556, + 679 + ], + "type": "text", + "content": "End-to-End Training improves VAE latent-space structure. As seen in Fig. 2 and §4.4, we find that jointly tuning the VAE and latent diffusion model during training, automatically improves the latent space structure across different VAE architectures. For instance, for SD-VAE [40], it is observed that the original latent space suffers from high-frequency noise (Fig. 2). Applying end-to-end tuning helps learn a more smooth latent space representation. In contrast, the latent space for IN-VAE1 is over-smoothed. Applying REPA-E automatically helps learn more detailed latent space structure to best support generation performance. End-to-End Tuning Improves VAE Performance. Finally, we find that once tuned using REPA-E, the end-to-end tuned VAE can be used as a drop-in replacement for their original counterparts (e.g. SD-VAE) showing improved generation performance across diverse training settings and model architectures (refer §4.4)." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 679, + 555, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 679, + 555, + 704 + ], + "spans": [ + { + "bbox": [ + 313, + 679, + 555, + 704 + ], + "type": "text", + "content": "To summarize, key contributions of this paper are: 1) We propose REPA-E; an end-to-end training recipe for jointly" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 324, + 712, + 549, + 722 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 324, + 712, + 549, + 722 + ], + "spans": [ + { + "bbox": [ + 324, + 712, + 549, + 722 + ], + "type": "inline_equation", + "content": "{}^{1}" + }, + { + "bbox": [ + 324, + 712, + 549, + 722 + ], + "type": "text", + "content": " trained onImagenet at " + }, + { + "bbox": [ + 324, + 712, + 549, + 722 + ], + "type": "inline_equation", + "content": "{f16d32}" + }, + { + "bbox": [ + 324, + 712, + 549, + 722 + ], + "type": "text", + "content": " using official training code from [40]." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 294, + 204 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 294, + 204 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 294, + 204 + ], + "type": "text", + "content": "tuning both VAE and LDM using representation alignment loss (§3). 2) We find that despite its simplicity, REPA-E leads to accelerated generation performance; speeding up diffusion training by over " + }, + { + "bbox": [ + 55, + 72, + 294, + 204 + ], + "type": "inline_equation", + "content": "17 \\times" + }, + { + "bbox": [ + 55, + 72, + 294, + 204 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 55, + 72, + 294, + 204 + ], + "type": "inline_equation", + "content": "45 \\times" + }, + { + "bbox": [ + 55, + 72, + 294, + 204 + ], + "type": "text", + "content": " over REPA and vanilla training recipes, respectively (§4.2). 3) We show that end-to-end training is able to adaptively improve the latent space structure across diverse VAE architectures. 4) We demonstrate that once tuned using REPA-E, the end-to-end tuned VAE can be used as a drop-in replacement for their original counterparts (e.g., SD-VAE), exhibiting significantly better downstream generation performance (§4.4)." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 215, + 142, + 227 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 215, + 142, + 227 + ], + "spans": [ + { + "bbox": [ + 55, + 215, + 142, + 227 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 236, + 295, + 331 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 236, + 295, + 331 + ], + "spans": [ + { + "bbox": [ + 55, + 236, + 295, + 331 + ], + "type": "text", + "content": "Tokenizer or autoencoders (AE) [3] use either the variational objective [22] for continuous tokenization or a vector quantization objective [9, 48] for discrete tokenization [8-10, 16, 21, 22, 36, 40, 48, 53, 55]. However, current tokenizers are primarily trained for minimizing the reconstruction error, which maybe not provide the optimal latent space for generation [24]. We show that improved latent space structure is achieved by end-to-end training of LDMs." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 331, + 295, + 427 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 331, + 295, + 427 + ], + "spans": [ + { + "bbox": [ + 55, + 331, + 295, + 427 + ], + "type": "text", + "content": "Latent diffusion models leverage pre-trained image tokenizers to compress images into a lower-dimensional latent space to simplify the generative task [5, 10, 10, 11, 26, 32, 36, 40, 43, 47]. Despite their effectiveness, existing tokenizers and diffusion models are trained separately [10, 36, 40]. In this paper, we explore jointly optimizing tokenizers and diffusion models to achieve faster convergence and improved generation performance (Sec. 4)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 427, + 295, + 499 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 427, + 295, + 499 + ], + "spans": [ + { + "bbox": [ + 55, + 427, + 295, + 499 + ], + "type": "text", + "content": "Representation alignment for generative learning has recently shown huge promise for improving the training speed and performance of diffusion models [35, 50, 54]. We find that instead of applying the REPA loss separately over LDM [54] or VAE [50], significantly better performance and training speed can be achieved through E2E training." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 499, + 295, + 583 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 499, + 295, + 583 + ], + "spans": [ + { + "bbox": [ + 55, + 499, + 295, + 583 + ], + "type": "text", + "content": "End-to-End Diffusion. LSGM [47] explores joint training with score-based generative models, which uses a variational lower bound objective with an entropy term for preventing latent space collapse while backpropagating the diffusion loss. We empirically find that while this helps prevent latent space collapse, REPA-E shows significantly faster convergence during E2E training (refer App. B)." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 594, + 294, + 608 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 594, + 294, + 608 + ], + "spans": [ + { + "bbox": [ + 55, + 594, + 294, + 608 + ], + "type": "text", + "content": "3. REPA-E: Unlocking VAE for Joint Training" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 614, + 295, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 614, + 295, + 723 + ], + "spans": [ + { + "bbox": [ + 55, + 614, + 295, + 723 + ], + "type": "text", + "content": "Overview. Given a variational autoencoder (VAE) and latent diffusion transformer (e.g., SiT [30]), we wish to jointly tune the VAE latent representation and diffusion model features in an end-to-end manner to best optimize the final generation performance. To this end, we first make three key insights in §3.1: 1) Naive end-to-end tuning - directly back-propagating the diffusion loss to the VAE is ineffective. The diffusion loss encourages learning a more simpler latent space structure (Fig. 3a) which is easier for min" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 72, + 553, + 216 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 553, + 216 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 553, + 216 + ], + "type": "text", + "content": "imizing the denoising objective [40], but degrades the final generation performance. We next analyze the recently proposed representation-alignment loss [54] showing that; 2) Higher representation-alignment score [54] correlates with improved generation performance (Fig. 3b). This offers an alternate path for improving final generation performance using representation-alignment score as a proxy. 3) The maximum achievable alignment score with vanilla-REPA is bottlenecked by the VAE latent space features. We further show that backpropagating the REPA loss to the VAE during training can help address this limitation, significantly improving final representation-alignment score (Fig. 3c)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 217, + 553, + 348 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 217, + 553, + 348 + ], + "spans": [ + { + "bbox": [ + 313, + 217, + 553, + 348 + ], + "type": "text", + "content": "Given the above insights, we finally propose REPA-E (§3.2); an end-to-end tuning recipe for both VAE and LDM features. Our key idea is simple: instead of directly using diffusion loss for end-to-end tuning, we can use the representation alignment score as a proxy for the final generation performance. This motivates our final approach, where instead of the diffusion loss, we propose to perform end-to-end training using the representation-alignment loss. The end-to-end training with REPA loss helps better improve the final representation-alignment score (Fig. 3b), which in turn leads to improved final generation performance (§3.1)." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 357, + 545, + 369 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 357, + 545, + 369 + ], + "spans": [ + { + "bbox": [ + 313, + 357, + 545, + 369 + ], + "type": "text", + "content": "3.1. Motivating End-to-End Training with REPA" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 375, + 553, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 375, + 553, + 723 + ], + "spans": [ + { + "bbox": [ + 313, + 375, + 553, + 723 + ], + "type": "text", + "content": "Naive End-to-End Tuning is Ineffective. We first analyze the naive approach for end-to-end tuning; directly backpropagating the diffusion loss to the VAE tokenizer. As shown in Fig. 3a, we observe that directly backpropagating the diffusion loss encourages learning a more simpler latent space structure with lower variance along the spatial dimensions (Tab. 10). The simpler latent-space structure poses an easier problem for the denoising objective [40], but leads to reduced generation performance (Fig. 1). Consider an intermediate latent " + }, + { + "bbox": [ + 313, + 375, + 553, + 723 + ], + "type": "inline_equation", + "content": "z_{t} = \\alpha_{t}z_{\\mathrm{VAE}} + \\sigma_{t}\\epsilon_{orig}" + }, + { + "bbox": [ + 313, + 375, + 553, + 723 + ], + "type": "text", + "content": " for any timestep " + }, + { + "bbox": [ + 313, + 375, + 553, + 723 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 313, + 375, + 553, + 723 + ], + "type": "text", + "content": ". The denoising objective [34] mainly aims to predict " + }, + { + "bbox": [ + 313, + 375, + 553, + 723 + ], + "type": "inline_equation", + "content": "\\epsilon_{pred}" + }, + { + "bbox": [ + 313, + 375, + 553, + 723 + ], + "type": "text", + "content": "; estimating the originally added noise " + }, + { + "bbox": [ + 313, + 375, + 553, + 723 + ], + "type": "inline_equation", + "content": "\\epsilon_{orig}" + }, + { + "bbox": [ + 313, + 375, + 553, + 723 + ], + "type": "text", + "content": " from VAE features " + }, + { + "bbox": [ + 313, + 375, + 553, + 723 + ], + "type": "inline_equation", + "content": "z_{\\mathrm{VAE}}" + }, + { + "bbox": [ + 313, + 375, + 553, + 723 + ], + "type": "text", + "content": " and timestep " + }, + { + "bbox": [ + 313, + 375, + 553, + 723 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 313, + 375, + 553, + 723 + ], + "type": "text", + "content": ". As the variance along the spatial dimensions for VAE latent " + }, + { + "bbox": [ + 313, + 375, + 553, + 723 + ], + "type": "inline_equation", + "content": "z_{\\mathrm{VAE}}" + }, + { + "bbox": [ + 313, + 375, + 553, + 723 + ], + "type": "text", + "content": " goes down, the denoising objective effectively reduces to predicting a bias term for recovering back the originally added noise " + }, + { + "bbox": [ + 313, + 375, + 553, + 723 + ], + "type": "inline_equation", + "content": "\\epsilon_{orig}" + }, + { + "bbox": [ + 313, + 375, + 553, + 723 + ], + "type": "text", + "content": ". Thus, backpropagation the diffusion loss effectively hacks the latent space structure to create an easier denoising problem, but leads to a reduced generation performance (Fig. 1). Higher Representation Alignment Correlates with Better Generation Performance. Similar to the findings of [54], we also measure representation alignment using CKNNA scores [19] across different model sizes and training iterations. As seen in Fig. 3b, we observe that higher representation alignment during the training process leads to improved generation performance. This suggests an alternate path for improving generation performance by using the representation alignment objective instead of the diffusion loss for end-to-end training (refer §3.2)." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 742, + 308, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 742, + 308, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 742, + 308, + 750 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 57, + 76, + 96, + 115 + ], + "blocks": [ + { + "bbox": [ + 66, + 59, + 85, + 68 + ], + "lines": [ + { + "bbox": [ + 66, + 59, + 85, + 68 + ], + "spans": [ + { + "bbox": [ + 66, + 59, + 85, + 68 + ], + "type": "text", + "content": "RGB" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 67, + 68, + 86, + 76 + ], + "lines": [ + { + "bbox": [ + 67, + 68, + 86, + 76 + ], + "spans": [ + { + "bbox": [ + 67, + 68, + 86, + 76 + ], + "type": "text", + "content": "Image" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 57, + 76, + 96, + 115 + ], + "lines": [ + { + "bbox": [ + 57, + 76, + 96, + 115 + ], + "spans": [ + { + "bbox": [ + 57, + 76, + 96, + 115 + ], + "type": "image", + "image_path": "aed0aebb7aa7f26a90775c3d741b04e5ee30a74fde9c4116c849efd5d8dfb03f.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 97, + 76, + 136, + 115 + ], + "blocks": [ + { + "bbox": [ + 102, + 60, + 131, + 68 + ], + "lines": [ + { + "bbox": [ + 102, + 60, + 131, + 68 + ], + "spans": [ + { + "bbox": [ + 102, + 60, + 131, + 68 + ], + "type": "text", + "content": "SDVAE" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 102, + 68, + 131, + 76 + ], + "lines": [ + { + "bbox": [ + 102, + 68, + 131, + 76 + ], + "spans": [ + { + "bbox": [ + 102, + 68, + 131, + 76 + ], + "type": "text", + "content": "w/o E2E" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 97, + 76, + 136, + 115 + ], + "lines": [ + { + "bbox": [ + 97, + 76, + 136, + 115 + ], + "spans": [ + { + "bbox": [ + 97, + 76, + 136, + 115 + ], + "type": "image", + "image_path": "678247a76edebf75eef996545bf9f71dcd7acf3febf0a69f2a6874afe513f021.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 137, + 76, + 177, + 115 + ], + "blocks": [ + { + "bbox": [ + 141, + 60, + 172, + 68 + ], + "lines": [ + { + "bbox": [ + 141, + 60, + 172, + 68 + ], + "spans": [ + { + "bbox": [ + 141, + 60, + 172, + 68 + ], + "type": "text", + "content": "E2E with" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 138, + 68, + 175, + 76 + ], + "lines": [ + { + "bbox": [ + 138, + 68, + 175, + 76 + ], + "spans": [ + { + "bbox": [ + 138, + 68, + 175, + 76 + ], + "type": "text", + "content": "REPA Loss" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 137, + 76, + 177, + 115 + ], + "lines": [ + { + "bbox": [ + 137, + 76, + 177, + 115 + ], + "spans": [ + { + "bbox": [ + 137, + 76, + 177, + 115 + ], + "type": "image", + "image_path": "737ee93bcfc4726d031fc27ae03451e0ae5dc24d68c85911901ac5255cefdf3a.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 178, + 76, + 217, + 115 + ], + "blocks": [ + { + "bbox": [ + 181, + 60, + 212, + 68 + ], + "lines": [ + { + "bbox": [ + 181, + 60, + 212, + 68 + ], + "spans": [ + { + "bbox": [ + 181, + 60, + 212, + 68 + ], + "type": "text", + "content": "E2E with" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 181, + 68, + 212, + 76 + ], + "lines": [ + { + "bbox": [ + 181, + 68, + 212, + 76 + ], + "spans": [ + { + "bbox": [ + 181, + 68, + 212, + 76 + ], + "type": "text", + "content": "Diff, Loss" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 178, + 76, + 217, + 115 + ], + "lines": [ + { + "bbox": [ + 178, + 76, + 217, + 115 + ], + "spans": [ + { + "bbox": [ + 178, + 76, + 217, + 115 + ], + "type": "image", + "image_path": "ecca1ad26ed6740024c2b424367c42553d5a44b65aea95da9b3a313377175628.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 58, + 115, + 96, + 155 + ], + "blocks": [ + { + "bbox": [ + 58, + 115, + 96, + 155 + ], + "lines": [ + { + "bbox": [ + 58, + 115, + 96, + 155 + ], + "spans": [ + { + "bbox": [ + 58, + 115, + 96, + 155 + ], + "type": "image", + "image_path": "3a884ef9a7201faa6ce809763a9801e6306cc17f1db558b545d4255c6db7f8fb.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 97, + 115, + 136, + 155 + ], + "blocks": [ + { + "bbox": [ + 97, + 115, + 136, + 155 + ], + "lines": [ + { + "bbox": [ + 97, + 115, + 136, + 155 + ], + "spans": [ + { + "bbox": [ + 97, + 115, + 136, + 155 + ], + "type": "image", + "image_path": "3b2ef12788c4a2120146d9f5d497dc212500338f2b6db3d99890fedb7710f1b6.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 137, + 115, + 177, + 155 + ], + "blocks": [ + { + "bbox": [ + 137, + 115, + 177, + 155 + ], + "lines": [ + { + "bbox": [ + 137, + 115, + 177, + 155 + ], + "spans": [ + { + "bbox": [ + 137, + 115, + 177, + 155 + ], + "type": "image", + "image_path": "6d94cb8882bfecafaab1bbb1acfc06fe066cc5249188812276c3541185a1e8ca.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 178, + 115, + 217, + 155 + ], + "blocks": [ + { + "bbox": [ + 178, + 115, + 217, + 155 + ], + "lines": [ + { + "bbox": [ + 178, + 115, + 217, + 155 + ], + "spans": [ + { + "bbox": [ + 178, + 115, + 217, + 155 + ], + "type": "image", + "image_path": "2062b30ebd77b104482e0ace1af3c36cfdc092c6d22e5d9f827fdc42a5cdf62f.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 58, + 155, + 96, + 194 + ], + "blocks": [ + { + "bbox": [ + 58, + 155, + 96, + 194 + ], + "lines": [ + { + "bbox": [ + 58, + 155, + 96, + 194 + ], + "spans": [ + { + "bbox": [ + 58, + 155, + 96, + 194 + ], + "type": "image", + "image_path": "026a2509afc429c08ebf20dbfc03dde3f7f9882b223f893a1155f70e028d28db.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 73, + 196, + 200, + 206 + ], + "lines": [ + { + "bbox": [ + 73, + 196, + 200, + 206 + ], + "spans": [ + { + "bbox": [ + 73, + 196, + 200, + 206 + ], + "type": "text", + "content": "(a) PCA Visualization of Latent Spaces" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_caption" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 97, + 155, + 136, + 194 + ], + "blocks": [ + { + "bbox": [ + 97, + 155, + 136, + 194 + ], + "lines": [ + { + "bbox": [ + 97, + 155, + 136, + 194 + ], + "spans": [ + { + "bbox": [ + 97, + 155, + 136, + 194 + ], + "type": "image", + "image_path": "d1e9c9c23709cb744c68d3c891fec414dcd095ab25592add4488c6349b77a306.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 137, + 155, + 177, + 194 + ], + "blocks": [ + { + "bbox": [ + 137, + 155, + 177, + 194 + ], + "lines": [ + { + "bbox": [ + 137, + 155, + 177, + 194 + ], + "spans": [ + { + "bbox": [ + 137, + 155, + 177, + 194 + ], + "type": "image", + "image_path": "ae8c4a8f75e3bc72d7182c5a9a929d9d0aa2a4238ec623bfea7b8c6ef63012e0.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 178, + 155, + 217, + 194 + ], + "blocks": [ + { + "bbox": [ + 178, + 155, + 217, + 194 + ], + "lines": [ + { + "bbox": [ + 178, + 155, + 217, + 194 + ], + "spans": [ + { + "bbox": [ + 178, + 155, + 217, + 194 + ], + "type": "image", + "image_path": "96122cd52837ce36699d9fc4d592f85bbb3c75af36fa0f639b4dd5f6896b3775.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 226, + 65, + 353, + 194 + ], + "blocks": [ + { + "bbox": [ + 226, + 65, + 353, + 194 + ], + "lines": [ + { + "bbox": [ + 226, + 65, + 353, + 194 + ], + "spans": [ + { + "bbox": [ + 226, + 65, + 353, + 194 + ], + "type": "image", + "image_path": "4084e4f8a694c979c3b8b8e5e18a0d0a6961457d400ae16e873a14a36c04d873.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 226, + 196, + 354, + 206 + ], + "lines": [ + { + "bbox": [ + 226, + 196, + 354, + 206 + ], + "spans": [ + { + "bbox": [ + 226, + 196, + 354, + 206 + ], + "type": "text", + "content": "(b) Correlation: gFID & CKNNA Score" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_caption" + } + ], + "index": 21 + }, + { + "type": "image", + "bbox": [ + 356, + 75, + 548, + 196 + ], + "blocks": [ + { + "bbox": [ + 356, + 75, + 548, + 196 + ], + "lines": [ + { + "bbox": [ + 356, + 75, + 548, + 196 + ], + "spans": [ + { + "bbox": [ + 356, + 75, + 548, + 196 + ], + "type": "image", + "image_path": "3af1810a7f98b4e4da924a364be17b210eb220e9b0b6c4f9bb76ce63a6aa8af7.jpg" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 375, + 196, + 542, + 206 + ], + "lines": [ + { + "bbox": [ + 375, + 196, + 542, + 206 + ], + "spans": [ + { + "bbox": [ + 375, + 196, + 542, + 206 + ], + "type": "text", + "content": "(c) E2E tuning with REPA improves CKNNA Score" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 55, + 212, + 555, + 322 + ], + "lines": [ + { + "bbox": [ + 55, + 212, + 555, + 322 + ], + "spans": [ + { + "bbox": [ + 55, + 212, + 555, + 322 + ], + "type": "text", + "content": "Figure 3. Motivating End-to-End Tuning using Representation Alignment (REPA) Loss. We make three key insights: 1) Naive end-to-end (E2E) tuning using diffusion loss is ineffective. The diffusion encourages learning a more simpler latent space structure (a) which is easier for denoising objective (refer §3.1) but degrades final generation performance (Fig. 1). We next analyze the recently proposed representation alignment (REPA) loss [54] showing: 2) Higher representation alignment (CKNNA) leads to better generation performance. This suggests an alternate path for improving performance by using representation-alignment (CKNNA) as proxy for generation performance. 3) The maximum achievable CKNNA score with vanilla-REPA is bottlenecked by the VAE features (c) saturating around " + }, + { + "bbox": [ + 55, + 212, + 555, + 322 + ], + "type": "inline_equation", + "content": "\\sim 0.42" + }, + { + "bbox": [ + 55, + 212, + 555, + 322 + ], + "type": "text", + "content": ". Back-propagating the REPA-loss to the VAE helps address this limitation and improve the final CKNNA score. Given the above insights: we propose REPA-E (" + }, + { + "bbox": [ + 55, + 212, + 555, + 322 + ], + "type": "inline_equation", + "content": "\\S 3.2" + }, + { + "bbox": [ + 55, + 212, + 555, + 322 + ], + "type": "text", + "content": ") for end-to-end LDM training. The key idea is simple: instead of using the diffusion loss, we perform end-to-end training using the REPA loss. The end-to-end training with REPA loss helps improve the final representation-alignment (CKNNA), which in turn leads to improved generation performance (" + }, + { + "bbox": [ + 55, + 212, + 555, + 322 + ], + "type": "inline_equation", + "content": "\\S 4" + }, + { + "bbox": [ + 55, + 212, + 555, + 322 + ], + "type": "text", + "content": ")." + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_caption" + } + ], + "index": 23 + }, + { + "bbox": [ + 55, + 332, + 295, + 452 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 332, + 295, + 452 + ], + "spans": [ + { + "bbox": [ + 55, + 332, + 295, + 452 + ], + "type": "text", + "content": "Representation Alignment is Bottlenecked by the VAE Features. Fig. 3c shows that while the naive application of REPA loss [54] leads to improved representation-alignment (CKNNA) score, the maximum achievable alignment score is still bottlenecked the VAE features saturating around a value of 0.4 (maximum value of 1). Furthermore, we find that backpropagating the representation-alignment loss to the VAE helps address this limitation; allowing end-to-end optimization of the VAE features to best support representation-alignment objective [54]." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 55, + 463, + 231, + 475 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 463, + 231, + 475 + ], + "spans": [ + { + "bbox": [ + 55, + 463, + 231, + 475 + ], + "type": "text", + "content": "3.2. End-to-End Training with REPA" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 55, + 482, + 295, + 589 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 482, + 295, + 589 + ], + "spans": [ + { + "bbox": [ + 55, + 482, + 295, + 589 + ], + "type": "text", + "content": "Given the above insights, we next propose REPA-E (§3.2); an end-to-end tuning recipe for jointly training both VAE and LDM features. Instead of directly using diffusion loss, we propose to perform end-to-end training using the representation-alignment loss. The end-to-end training with REPA loss helps better improve the final representation-alignment score (Fig. 3c), which in turn leads to improved final generation performance (refer §4.2). We next discuss key details for implementation of REPA-E for training." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 55, + 591, + 295, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 591, + 295, + 723 + ], + "spans": [ + { + "bbox": [ + 55, + 591, + 295, + 723 + ], + "type": "text", + "content": "Batch-Norm Layer for VAE Latent Normalization. To enable end-to-end training, we first introduce a batchnorm layer between the VAE and latent diffusion model (Fig. 1). Typical LDM training involves normalizing the VAE features using precomputed latent statistics (e.g., std " + }, + { + "bbox": [ + 55, + 591, + 295, + 723 + ], + "type": "inline_equation", + "content": "= 1 / 0.1825" + }, + { + "bbox": [ + 55, + 591, + 295, + 723 + ], + "type": "text", + "content": " for SD-VAE [40]). This helps normalize the VAE latent outputs to zero mean and unit variance for more efficient training for the diffusion model. However, with end-to-end training the statistics need to be recomputed whenever the VAE model is updated - which is expensive. To address this, we propose the use of a batch" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 313, + 332, + 553, + 392 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 332, + 553, + 392 + ], + "spans": [ + { + "bbox": [ + 313, + 332, + 553, + 392 + ], + "type": "text", + "content": "norm layer [20] which uses the exponential moving average (EMA) mean and variance as a surrogate for dataset-level statistics. The batch-norm layer thus acts as a differentiable normalization operator without the need for recomputing dataset level statistics after each optimization step." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 313, + 392, + 554, + 513 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 392, + 554, + 513 + ], + "spans": [ + { + "bbox": [ + 313, + 392, + 554, + 513 + ], + "type": "text", + "content": "End-to-End Representation-Alignment Loss. We next enable end-to-end training, by using the REPA loss [54] for updating the parameters for both VAE and LDM during training. Formally, let " + }, + { + "bbox": [ + 313, + 392, + 554, + 513 + ], + "type": "inline_equation", + "content": "\\mathcal{V}_{\\phi}" + }, + { + "bbox": [ + 313, + 392, + 554, + 513 + ], + "type": "text", + "content": " represent the VAE, " + }, + { + "bbox": [ + 313, + 392, + 554, + 513 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_{\\theta}" + }, + { + "bbox": [ + 313, + 392, + 554, + 513 + ], + "type": "text", + "content": " be the diffusion model, " + }, + { + "bbox": [ + 313, + 392, + 554, + 513 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 313, + 392, + 554, + 513 + ], + "type": "text", + "content": " be the fixed pretrained perceptual model (e.g., DINO-v2 [33]) for REPA [54] and " + }, + { + "bbox": [ + 313, + 392, + 554, + 513 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 313, + 392, + 554, + 513 + ], + "type": "text", + "content": " be a clean image. Also similar to REPA, consider " + }, + { + "bbox": [ + 313, + 392, + 554, + 513 + ], + "type": "inline_equation", + "content": "h_{\\omega}(\\mathbf{h}_t)" + }, + { + "bbox": [ + 313, + 392, + 554, + 513 + ], + "type": "text", + "content": " be the projection of diffusion transformer output " + }, + { + "bbox": [ + 313, + 392, + 554, + 513 + ], + "type": "inline_equation", + "content": "\\mathbf{h}_t" + }, + { + "bbox": [ + 313, + 392, + 554, + 513 + ], + "type": "text", + "content": " through a trainable projection layer " + }, + { + "bbox": [ + 313, + 392, + 554, + 513 + ], + "type": "inline_equation", + "content": "h_{\\omega}" + }, + { + "bbox": [ + 313, + 392, + 554, + 513 + ], + "type": "text", + "content": ". We then perform end-to-end training by applying the REPA loss over both LDM and VAE as," + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 315, + 517, + 552, + 550 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 517, + 552, + 550 + ], + "spans": [ + { + "bbox": [ + 315, + 517, + 552, + 550 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\mathrm {R E P A}} (\\theta , \\phi , \\omega) = - \\mathbb {E} _ {\\mathbf {x}, \\epsilon , t} \\left[ \\frac {1}{N} \\sum_ {n = 1} ^ {N} \\operatorname {s i m} \\left(\\mathbf {y} ^ {[ n ]}, h _ {\\omega} \\left(\\mathbf {h} _ {t} ^ {[ n ]}\\right)\\right) \\right],", + "image_path": "bd115de09eaaa1deabc44ecd1b4506a518d6f0770f80afac9bf401d68d93999c.jpg" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 313, + 555, + 553, + 614 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 555, + 553, + 614 + ], + "spans": [ + { + "bbox": [ + 313, + 555, + 553, + 614 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 313, + 555, + 553, + 614 + ], + "type": "inline_equation", + "content": "\\mathbf{y} = f(\\mathbf{x})" + }, + { + "bbox": [ + 313, + 555, + 553, + 614 + ], + "type": "text", + "content": " is the output of the pretrained perceptual model (e.g., DINO-v2 [33]), " + }, + { + "bbox": [ + 313, + 555, + 553, + 614 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 313, + 555, + 553, + 614 + ], + "type": "text", + "content": " is number of patches, " + }, + { + "bbox": [ + 313, + 555, + 553, + 614 + ], + "type": "inline_equation", + "content": "\\mathrm{sim}(< ., . >)" + }, + { + "bbox": [ + 313, + 555, + 553, + 614 + ], + "type": "text", + "content": " computes the patch-wise cosine similarities between pretrained representation " + }, + { + "bbox": [ + 313, + 555, + 553, + 614 + ], + "type": "inline_equation", + "content": "\\mathbf{y}" + }, + { + "bbox": [ + 313, + 555, + 553, + 614 + ], + "type": "text", + "content": " from perceptual model (e.g., DINO-v2) and diffusion transformer hidden state " + }, + { + "bbox": [ + 313, + 555, + 553, + 614 + ], + "type": "inline_equation", + "content": "\\mathbf{h}_t" + }, + { + "bbox": [ + 313, + 555, + 553, + 614 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 313, + 615, + 554, + 686 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 615, + 554, + 686 + ], + "spans": [ + { + "bbox": [ + 313, + 615, + 554, + 686 + ], + "type": "text", + "content": "Diffusion Loss with Stop-Gradient. As discussed in Fig. 3a and §3.1, backpropagating the diffusion loss to the VAE causes a degradation of latent-space structure. To avoid this, we introduce a simple stopgrad operation which limits the application of diffusion loss " + }, + { + "bbox": [ + 313, + 615, + 554, + 686 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{DIFF}}" + }, + { + "bbox": [ + 313, + 615, + 554, + 686 + ], + "type": "text", + "content": " to only the parameters " + }, + { + "bbox": [ + 313, + 615, + 554, + 686 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 313, + 615, + 554, + 686 + ], + "type": "text", + "content": " of the latent diffusion model " + }, + { + "bbox": [ + 313, + 615, + 554, + 686 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_{\\theta}" + }, + { + "bbox": [ + 313, + 615, + 554, + 686 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 313, + 687, + 553, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 687, + 553, + 723 + ], + "spans": [ + { + "bbox": [ + 313, + 687, + 553, + 723 + ], + "type": "text", + "content": "VAE Regularization Losses. Finally, we introduce regularization losses " + }, + { + "bbox": [ + 313, + 687, + 553, + 723 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{REG}}" + }, + { + "bbox": [ + 313, + 687, + 553, + 723 + ], + "type": "text", + "content": " for VAE " + }, + { + "bbox": [ + 313, + 687, + 553, + 723 + ], + "type": "inline_equation", + "content": "\\nu_{\\phi}" + }, + { + "bbox": [ + 313, + 687, + 553, + 723 + ], + "type": "text", + "content": ", to ensure that the end-to-end training process does not impact the reconstruction" + } + ] + } + ], + "index": 35 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 36 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 60, + 61, + 555, + 300 + ], + "blocks": [ + { + "bbox": [ + 60, + 61, + 555, + 300 + ], + "lines": [ + { + "bbox": [ + 60, + 61, + 555, + 300 + ], + "spans": [ + { + "bbox": [ + 60, + 61, + 555, + 300 + ], + "type": "image", + "image_path": "b0f6d7040a425954eeab73ec3015199d815cfcfb9afc1f1b22d8e551e28f6b7f.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 304, + 555, + 339 + ], + "lines": [ + { + "bbox": [ + 55, + 304, + 555, + 339 + ], + "spans": [ + { + "bbox": [ + 55, + 304, + 555, + 339 + ], + "type": "text", + "content": "Figure 4. End-to-End Tuning (REPA-E) Improves Visual Scaling. We observe that REPA-E produces higher-quality images at " + }, + { + "bbox": [ + 55, + 304, + 555, + 339 + ], + "type": "inline_equation", + "content": "400\\mathrm{K}" + }, + { + "bbox": [ + 55, + 304, + 555, + 339 + ], + "type": "text", + "content": " steps compared with the vanilla-REPA and generates more structurally meaningful images even in the early stages of training. Results for both methods are sampled using the same seed, noise and class label. We use a classifier-free guidance scale of 4.0 during sampling." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 349, + 295, + 396 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 349, + 295, + 396 + ], + "spans": [ + { + "bbox": [ + 55, + 349, + 295, + 396 + ], + "type": "text", + "content": "performance (rFID) of the original VAE. In particular, following [1], we use three losses, 1) Reconstruction Losses " + }, + { + "bbox": [ + 55, + 349, + 295, + 396 + ], + "type": "inline_equation", + "content": "(\\mathcal{L}_{\\mathrm{MSE}},\\mathcal{L}_{\\mathrm{LPIPS}})" + }, + { + "bbox": [ + 55, + 349, + 295, + 396 + ], + "type": "text", + "content": ", 2) GAN Loss " + }, + { + "bbox": [ + 55, + 349, + 295, + 396 + ], + "type": "inline_equation", + "content": "(\\mathcal{L}_{\\mathrm{GAN}})" + }, + { + "bbox": [ + 55, + 349, + 295, + 396 + ], + "type": "text", + "content": ", 3) KL divergence loss " + }, + { + "bbox": [ + 55, + 349, + 295, + 396 + ], + "type": "inline_equation", + "content": "(\\mathcal{L}_{\\mathrm{KL}})" + }, + { + "bbox": [ + 55, + 349, + 295, + 396 + ], + "type": "text", + "content": " as regularization loss " + }, + { + "bbox": [ + 55, + 349, + 295, + 396 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{REG}}" + }, + { + "bbox": [ + 55, + 349, + 295, + 396 + ], + "type": "text", + "content": " for the VAE " + }, + { + "bbox": [ + 55, + 349, + 295, + 396 + ], + "type": "inline_equation", + "content": "\\nu_{\\phi}" + }, + { + "bbox": [ + 55, + 349, + 295, + 396 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 397, + 295, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 397, + 295, + 422 + ], + "spans": [ + { + "bbox": [ + 55, + 397, + 295, + 422 + ], + "type": "text", + "content": "Overall Training. The overall training is then performed in an end-to-end manner using the following loss," + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 57, + 430, + 293, + 444 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 430, + 293, + 444 + ], + "spans": [ + { + "bbox": [ + 57, + 430, + 293, + 444 + ], + "type": "interline_equation", + "content": "\\mathcal {L} (\\theta , \\phi , \\omega) = \\mathcal {L} _ {\\mathrm {D I F F}} (\\theta) + \\lambda \\mathcal {L} _ {\\mathrm {R E P A}} (\\theta , \\phi , \\omega) + \\eta \\mathcal {L} _ {\\mathrm {R E G}} (\\phi),", + "image_path": "32779fa7abeff32d8b30751ed86e723fd51a7418a4339d051f5089a2ad573a77.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 452, + 296, + 489 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 452, + 296, + 489 + ], + "spans": [ + { + "bbox": [ + 55, + 452, + 296, + 489 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 55, + 452, + 296, + 489 + ], + "type": "inline_equation", + "content": "\\theta, \\phi, \\omega" + }, + { + "bbox": [ + 55, + 452, + 296, + 489 + ], + "type": "text", + "content": " refer to the parameters for the LDM, VAE and trainable REPA projection layer [54], respectively. Further implementation details are provided in §4.1 and Appendix." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 498, + 137, + 512 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 498, + 137, + 512 + ], + "spans": [ + { + "bbox": [ + 55, + 498, + 137, + 512 + ], + "type": "text", + "content": "4. Experiments" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 518, + 295, + 554 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 518, + 295, + 554 + ], + "spans": [ + { + "bbox": [ + 55, + 518, + 295, + 554 + ], + "type": "text", + "content": "We next validate the performance of REPA-E and the effect of proposed components through extensive evaluation. In particular, we investigate three key research questions:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 55, + 554, + 296, + 651 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 56, + 554, + 295, + 578 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 554, + 295, + 578 + ], + "spans": [ + { + "bbox": [ + 56, + 554, + 295, + 578 + ], + "type": "text", + "content": "1. Can REPA-E significantly improve generation performance and training speed? (Sec. 4.2, Tab. 1, Fig. 1, 4)" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 55, + 578, + 295, + 613 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 578, + 295, + 613 + ], + "spans": [ + { + "bbox": [ + 55, + 578, + 295, + 613 + ], + "type": "text", + "content": "2. Does REPA-E generalize across variations in training settings including model-scale, architecture, encoder model for REPA etc.? (Sec. 4.3, Tab. 2, 3, 4, 5, 6, 7)" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 55, + 614, + 296, + 651 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 614, + 296, + 651 + ], + "spans": [ + { + "bbox": [ + 55, + 614, + 296, + 651 + ], + "type": "text", + "content": "3. Analyze the impact of end-to-end tuning (REPA-E) on VAE latent-space structure and downstream generation performance. (please refer Sec. 4.4, Fig. 6, Tab. 8, 9)" + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 55, + 657, + 105, + 670 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 657, + 105, + 670 + ], + "spans": [ + { + "bbox": [ + 55, + 657, + 105, + 670 + ], + "type": "text", + "content": "4.1. Setup" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 55, + 674, + 296, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 674, + 296, + 723 + ], + "spans": [ + { + "bbox": [ + 55, + 674, + 296, + 723 + ], + "type": "text", + "content": "Implementation Details. We follow the same setup as in SiT [30] and REPA [54] unless otherwise specified. All training is conducted on the ImageNet [6] training split. We adopt the same data preprocessing protocol as" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 349, + 555, + 612 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 349, + 555, + 612 + ], + "spans": [ + { + "bbox": [ + 313, + 349, + 555, + 612 + ], + "type": "text", + "content": "in ADM [7], where original images are center-cropped and resized to " + }, + { + "bbox": [ + 313, + 349, + 555, + 612 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 313, + 349, + 555, + 612 + ], + "type": "text", + "content": " resolution. We experiment with publicly available VAEs, including SD-VAE (f8d4) [40], VA-VAE (f16d32) [40], and our own f16d32 VAE trained on ImageNet, referred to as IN-VAE. Depending on the VAE downsampling rate, we adopt SiT-XL/1 and SiT-XL/2 for " + }, + { + "bbox": [ + 313, + 349, + 555, + 612 + ], + "type": "inline_equation", + "content": "4 \\times" + }, + { + "bbox": [ + 313, + 349, + 555, + 612 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 349, + 555, + 612 + ], + "type": "inline_equation", + "content": "16 \\times" + }, + { + "bbox": [ + 313, + 349, + 555, + 612 + ], + "type": "text", + "content": " downsampling rates, respectively, where 1 and 2 denote the patch sizes in the transformer embedding layer. We disable affine transformations in the BN [20] layer between the VAE and SiT, relying solely on the running mean and standard deviation. The VAE regularization loss combines multiple objectives and is defined as: " + }, + { + "bbox": [ + 313, + 349, + 555, + 612 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{REG}} = \\mathcal{L}_{\\mathrm{KL}} + \\mathcal{L}_{\\mathrm{MSE}} + \\mathcal{L}_{\\mathrm{LPIPS}} + \\mathcal{L}_{\\mathrm{GAN}}" + }, + { + "bbox": [ + 313, + 349, + 555, + 612 + ], + "type": "text", + "content": ". For alignment loss, we use DINOv2 [33] as external visual features and apply alignment to the eighth layer of the SiT model. Empirically, we set the alignment loss coefficient to " + }, + { + "bbox": [ + 313, + 349, + 555, + 612 + ], + "type": "inline_equation", + "content": "\\lambda_{\\mathrm{REPA}_g} = 0.5" + }, + { + "bbox": [ + 313, + 349, + 555, + 612 + ], + "type": "text", + "content": " for updating SiT and " + }, + { + "bbox": [ + 313, + 349, + 555, + 612 + ], + "type": "inline_equation", + "content": "\\lambda_{\\mathrm{REPA}_v} = 1.5" + }, + { + "bbox": [ + 313, + 349, + 555, + 612 + ], + "type": "text", + "content": " for VAE. For optimization, we use AdamW [23, 29] with a constant learning rate of " + }, + { + "bbox": [ + 313, + 349, + 555, + 612 + ], + "type": "inline_equation", + "content": "1 \\times 10^{-4}" + }, + { + "bbox": [ + 313, + 349, + 555, + 612 + ], + "type": "text", + "content": ", and a global batch size of 256. During training, we apply gradient clipping and exponential moving average (EMA) to the generative model for stable optimization. All experiments are conducted on 8 NVIDIA H100 GPUs." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 614, + 556, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 614, + 556, + 723 + ], + "spans": [ + { + "bbox": [ + 313, + 614, + 556, + 723 + ], + "type": "text", + "content": "Evaluation. For image generation evaluation, we strictly follow the ADM setup [7]. We report generation quality using Fréchet inception distance (gFID) [17], structural FID (sFID) [31], inception score (IS) [42], precision (Prec.) and recall (Rec.) [25], measured on 50K generated images. For sampling, we follow the approach in SiT [30] and REPA [54], using the SDE Euler-Maruyama sampler with 250 steps. In terms of VAE benchmark, we measure the reconstruction FID (rFID) on 50K images from the Im" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 69, + 70, + 146, + 148 + ], + "blocks": [ + { + "bbox": [ + 69, + 70, + 146, + 148 + ], + "lines": [ + { + "bbox": [ + 69, + 70, + 146, + 148 + ], + "spans": [ + { + "bbox": [ + 69, + 70, + 146, + 148 + ], + "type": "image", + "image_path": "c0ccebd2a37cbf0f831d5a90307f3271837c5bd97b301ad920db8dddccafe7f4.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 148, + 70, + 225, + 148 + ], + "blocks": [ + { + "bbox": [ + 148, + 70, + 225, + 148 + ], + "lines": [ + { + "bbox": [ + 148, + 70, + 225, + 148 + ], + "spans": [ + { + "bbox": [ + 148, + 70, + 225, + 148 + ], + "type": "image", + "image_path": "591dcd624ebe552ccf98780d13bf06ce9ec7c6a6452497a963591a1120d0e0bc.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 227, + 70, + 304, + 148 + ], + "blocks": [ + { + "bbox": [ + 227, + 70, + 304, + 148 + ], + "lines": [ + { + "bbox": [ + 227, + 70, + 304, + 148 + ], + "spans": [ + { + "bbox": [ + 227, + 70, + 304, + 148 + ], + "type": "image", + "image_path": "98d5346a3f5d469ff855020e5e08533d39161900a58da1f5b6d2bc7be4a914b7.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 306, + 70, + 383, + 148 + ], + "blocks": [ + { + "bbox": [ + 306, + 70, + 383, + 148 + ], + "lines": [ + { + "bbox": [ + 306, + 70, + 383, + 148 + ], + "spans": [ + { + "bbox": [ + 306, + 70, + 383, + 148 + ], + "type": "image", + "image_path": "fd12988f16d053f4c42208367c590281d1f1ccceeec126036badaef31a99e14a.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 385, + 70, + 462, + 148 + ], + "blocks": [ + { + "bbox": [ + 385, + 70, + 462, + 148 + ], + "lines": [ + { + "bbox": [ + 385, + 70, + 462, + 148 + ], + "spans": [ + { + "bbox": [ + 385, + 70, + 462, + 148 + ], + "type": "image", + "image_path": "ffb775a5a42001303337822551611cf8813cb9ca3412dea2c0caff6206c92060.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 463, + 70, + 541, + 148 + ], + "blocks": [ + { + "bbox": [ + 463, + 70, + 541, + 148 + ], + "lines": [ + { + "bbox": [ + 463, + 70, + 541, + 148 + ], + "spans": [ + { + "bbox": [ + 463, + 70, + 541, + 148 + ], + "type": "image", + "image_path": "9b918e4e0af32f5170c59f334fc411b4925cc05d2d86c173ad1e1740e0c6ca05.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 69, + 148, + 146, + 226 + ], + "blocks": [ + { + "bbox": [ + 69, + 148, + 146, + 226 + ], + "lines": [ + { + "bbox": [ + 69, + 148, + 146, + 226 + ], + "spans": [ + { + "bbox": [ + 69, + 148, + 146, + 226 + ], + "type": "image", + "image_path": "732c2cd3f89282ebd0a31c3d19337f2d8bfc6478b1fdfeb81e104bd6b2f3b41d.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 148, + 148, + 225, + 226 + ], + "blocks": [ + { + "bbox": [ + 148, + 148, + 225, + 226 + ], + "lines": [ + { + "bbox": [ + 148, + 148, + 225, + 226 + ], + "spans": [ + { + "bbox": [ + 148, + 148, + 225, + 226 + ], + "type": "image", + "image_path": "2578ec0c923a1161d476acd025b22fe4449057186fb959695c72d26185dc88cf.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 227, + 148, + 304, + 226 + ], + "blocks": [ + { + "bbox": [ + 227, + 148, + 304, + 226 + ], + "lines": [ + { + "bbox": [ + 227, + 148, + 304, + 226 + ], + "spans": [ + { + "bbox": [ + 227, + 148, + 304, + 226 + ], + "type": "image", + "image_path": "b04f9900754a787b98e10d234ab8b15d8eca65c470b50220f75f889f9a11f468.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 306, + 148, + 383, + 226 + ], + "blocks": [ + { + "bbox": [ + 306, + 148, + 383, + 226 + ], + "lines": [ + { + "bbox": [ + 306, + 148, + 383, + 226 + ], + "spans": [ + { + "bbox": [ + 306, + 148, + 383, + 226 + ], + "type": "image", + "image_path": "938d6a0dae447cad9ea39e63107549156e33218b9ca6a3565b0607e1df7efb1c.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 385, + 148, + 462, + 226 + ], + "blocks": [ + { + "bbox": [ + 385, + 148, + 462, + 226 + ], + "lines": [ + { + "bbox": [ + 385, + 148, + 462, + 226 + ], + "spans": [ + { + "bbox": [ + 385, + 148, + 462, + 226 + ], + "type": "image", + "image_path": "70e94662b383073ac040caac999f97c380f6c9dac51ab4b893a0a6a3f105693c.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 463, + 148, + 541, + 226 + ], + "blocks": [ + { + "bbox": [ + 463, + 148, + 541, + 226 + ], + "lines": [ + { + "bbox": [ + 463, + 148, + 541, + 226 + ], + "spans": [ + { + "bbox": [ + 463, + 148, + 541, + 226 + ], + "type": "image", + "image_path": "be5f24596e485bced0e6830cd32cfd126187b4d5cb069e03094ce1e4cef97c4d.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 69, + 228, + 146, + 304 + ], + "blocks": [ + { + "bbox": [ + 69, + 228, + 146, + 304 + ], + "lines": [ + { + "bbox": [ + 69, + 228, + 146, + 304 + ], + "spans": [ + { + "bbox": [ + 69, + 228, + 146, + 304 + ], + "type": "image", + "image_path": "23a834c33b92caf6f54237aa86afb54c1667eec855ce1c31c92f32512830e58c.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 58, + 309, + 550, + 322 + ], + "lines": [ + { + "bbox": [ + 58, + 309, + 550, + 322 + ], + "spans": [ + { + "bbox": [ + 58, + 309, + 550, + 322 + ], + "type": "text", + "content": "Figure 5. Qualitative Results on Imagenet " + }, + { + "bbox": [ + 58, + 309, + 550, + 322 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 58, + 309, + 550, + 322 + ], + "type": "text", + "content": " using E2E-VAE and SiT-XL. We use a classifier-free guidance scale " + }, + { + "bbox": [ + 58, + 309, + 550, + 322 + ], + "type": "inline_equation", + "content": "\\alpha_{\\mathrm{cfg}} = 4.0" + }, + { + "bbox": [ + 58, + 309, + 550, + 322 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 148, + 227, + 225, + 304 + ], + "blocks": [ + { + "bbox": [ + 148, + 227, + 225, + 304 + ], + "lines": [ + { + "bbox": [ + 148, + 227, + 225, + 304 + ], + "spans": [ + { + "bbox": [ + 148, + 227, + 225, + 304 + ], + "type": "image", + "image_path": "10a3b80800ca4cbbe8a8383df426c38aec1dc6cebe1d8c22ac60df810291d921.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 227, + 227, + 304, + 304 + ], + "blocks": [ + { + "bbox": [ + 227, + 227, + 304, + 304 + ], + "lines": [ + { + "bbox": [ + 227, + 227, + 304, + 304 + ], + "spans": [ + { + "bbox": [ + 227, + 227, + 304, + 304 + ], + "type": "image", + "image_path": "e6f4ef56876b31e928da107740a6f9255035f413280dccaa59cbef9c0972b9c6.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 306, + 227, + 383, + 304 + ], + "blocks": [ + { + "bbox": [ + 306, + 227, + 383, + 304 + ], + "lines": [ + { + "bbox": [ + 306, + 227, + 383, + 304 + ], + "spans": [ + { + "bbox": [ + 306, + 227, + 383, + 304 + ], + "type": "image", + "image_path": "d01256bacff19b6a3cc55c94487b9b303cf3b4501268a5a38f314e22bf356833.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 385, + 227, + 462, + 304 + ], + "blocks": [ + { + "bbox": [ + 385, + 227, + 462, + 304 + ], + "lines": [ + { + "bbox": [ + 385, + 227, + 462, + 304 + ], + "spans": [ + { + "bbox": [ + 385, + 227, + 462, + 304 + ], + "type": "image", + "image_path": "04d75970ad292add98f1113716832632b5e8e141ef16b0ddb372ed0b172873c5.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 463, + 227, + 541, + 304 + ], + "blocks": [ + { + "bbox": [ + 463, + 227, + 541, + 304 + ], + "lines": [ + { + "bbox": [ + 463, + 227, + 541, + 304 + ], + "spans": [ + { + "bbox": [ + 463, + 227, + 541, + 304 + ], + "type": "image", + "image_path": "3424b3de6794f2e2560bbb40c232b2c343909d6fe3029b8fca2169b1dde57471.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + } + ], + "index": 17 + }, + { + "type": "table", + "bbox": [ + 57, + 345, + 294, + 510 + ], + "blocks": [ + { + "bbox": [ + 57, + 345, + 294, + 510 + ], + "lines": [ + { + "bbox": [ + 57, + 345, + 294, + 510 + ], + "spans": [ + { + "bbox": [ + 57, + 345, + 294, + 510 + ], + "type": "table", + "html": "
MethodTokenizerEpochsgFID↓sFID↓IS↑
Without End-to-End Tuning
MaskDiT [56]SD-VAE16005.6910.34177.9
DiT [34]14009.626.85121.5
SiT [30]14008.616.32131.7
FasterDiT [51]4007.915.45131.3
REPA [54]SD-VAE2019.406.0667.4
4011.106.0667.4
807.905.06122.6
8005.905.73157.8
With End-to-End Tuning (Ours)
REPA-ESD-VAE*2012.835.0488.8
407.174.39123.7
804.074.60161.8
", + "image_path": "ebd09afb994a91ca2cde8433cbeccb25b078c9df362e91163c7c0a9f19d05664.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "table_body" + }, + { + "bbox": [ + 55, + 512, + 295, + 599 + ], + "lines": [ + { + "bbox": [ + 55, + 512, + 295, + 599 + ], + "spans": [ + { + "bbox": [ + 55, + 512, + 295, + 599 + ], + "type": "text", + "content": "Table 1. REPA-E for Accelerated Generation Performance. End-to-End training with REPA-E achieves significantly better performance (lower gFID) while using fewer epochs. Notably, REPA-E with only 80 epochs surpasses vanilla REPA using " + }, + { + "bbox": [ + 55, + 512, + 295, + 599 + ], + "type": "inline_equation", + "content": "10 \\times" + }, + { + "bbox": [ + 55, + 512, + 295, + 599 + ], + "type": "text", + "content": " epochs. * indicates that VAE is updated during end-to-end training. All results are w/o classifier-free guidance on ImageNet 256 × 256. Additional system-level comparisons with classifier-free guidance and state-of-the-art results are provided in Tab. 9." + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "table_footnote" + } + ], + "index": 19 + }, + { + "bbox": [ + 55, + 609, + 273, + 620 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 609, + 273, + 620 + ], + "spans": [ + { + "bbox": [ + 55, + 609, + 273, + 620 + ], + "type": "text", + "content": "ageNet [6] validation set at a resolution of " + }, + { + "bbox": [ + 55, + 609, + 273, + 620 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 55, + 609, + 273, + 620 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 55, + 631, + 284, + 643 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 631, + 284, + 643 + ], + "spans": [ + { + "bbox": [ + 55, + 631, + 284, + 643 + ], + "type": "text", + "content": "4.2. Impact on Training Performance and Speed" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 55, + 649, + 295, + 685 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 649, + 295, + 685 + ], + "spans": [ + { + "bbox": [ + 55, + 649, + 295, + 685 + ], + "type": "text", + "content": "We first analyze the impact of end-to-end tuning using REPA-E (Sec. 3.2) for improving generation performance and speed when training latent-diffusion transformers." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 55, + 686, + 296, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 686, + 296, + 723 + ], + "spans": [ + { + "bbox": [ + 55, + 686, + 296, + 723 + ], + "type": "text", + "content": "Quantitative Evaluation. We compare REPA-E against various latent diffusion model (LDM) baselines in Tab. 1. We evaluate models of similar sizes (" + }, + { + "bbox": [ + 55, + 686, + 296, + 723 + ], + "type": "inline_equation", + "content": "\\sim" + }, + { + "bbox": [ + 55, + 686, + 296, + 723 + ], + "type": "text", + "content": "675M parameters)" + } + ] + } + ], + "index": 24 + }, + { + "type": "table", + "bbox": [ + 317, + 345, + 550, + 434 + ], + "blocks": [ + { + "bbox": [ + 317, + 345, + 550, + 434 + ], + "lines": [ + { + "bbox": [ + 317, + 345, + 550, + 434 + ], + "spans": [ + { + "bbox": [ + 317, + 345, + 550, + 434 + ], + "type": "table", + "html": "
Diff. ModelgFID↓sFID↓IS↑Prec.↑Rec.↑
SiT-B (130M)49.57.0027.50.460.59
+REPA-E (Ours)34.86.3139.10.570.59
SiT-L (458M)24.16.2555.70.620.60
+REPA-E (Ours)16.35.6975.00.680.60
SiT-XL (675M)19.46.0667.40.640.61
+REPA-E (Ours)12.85.0488.80.710.58
", + "image_path": "9a812b0e81a6130477702ee4c4a9eb833570d2120becb8138fb64bcbdeb3bbb7.jpg" + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "table_body" + } + ], + "index": 25 + }, + { + "bbox": [ + 313, + 437, + 554, + 472 + ], + "lines": [ + { + "bbox": [ + 313, + 437, + 554, + 472 + ], + "spans": [ + { + "bbox": [ + 313, + 437, + 554, + 472 + ], + "type": "text", + "content": "Table 2. Variation in Model-Scale. We find that REPA-E brings substantial performance improvements across all model-scales. All baselines are reported using vanilla-REPA [54] for training." + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 313, + 481, + 555, + 649 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 481, + 555, + 649 + ], + "spans": [ + { + "bbox": [ + 313, + 481, + 555, + 649 + ], + "type": "text", + "content": "on ImageNet " + }, + { + "bbox": [ + 313, + 481, + 555, + 649 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 313, + 481, + 555, + 649 + ], + "type": "text", + "content": " generation task. All results are reported without classifier-free guidance [18] using popular SiT-XL [30] model for training. We make two observations; 1) End-to-End tuning leads to faster training: consistently improving generation FID (gFID) from " + }, + { + "bbox": [ + 313, + 481, + 555, + 649 + ], + "type": "inline_equation", + "content": "19.40 \\rightarrow 12.83" + }, + { + "bbox": [ + 313, + 481, + 555, + 649 + ], + "type": "text", + "content": " (20 epochs), " + }, + { + "bbox": [ + 313, + 481, + 555, + 649 + ], + "type": "inline_equation", + "content": "11.10 \\rightarrow 7.17" + }, + { + "bbox": [ + 313, + 481, + 555, + 649 + ], + "type": "text", + "content": " (40 epochs), and " + }, + { + "bbox": [ + 313, + 481, + 555, + 649 + ], + "type": "inline_equation", + "content": "7.90 \\rightarrow 4.07" + }, + { + "bbox": [ + 313, + 481, + 555, + 649 + ], + "type": "text", + "content": " (80 epochs), even when comparing with REPA [54]. 2) End-to-End training leads to better final performance: REPA-E at 80 epochs surpasses FasterDiT [51] (" + }, + { + "bbox": [ + 313, + 481, + 555, + 649 + ], + "type": "inline_equation", + "content": "gFID = 7.91" + }, + { + "bbox": [ + 313, + 481, + 555, + 649 + ], + "type": "text", + "content": ") trained for 400 epochs and even MaskDiT [56], DiT [34], and SiT [30] which are trained over 1400 epochs. For instance, REPA-E reaches an FID of 4.07 within 400K steps, significantly boosting final performance over even REPA which only reaches a final FID for 5.9 after 4M steps [54]." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 313, + 651, + 554, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 651, + 554, + 723 + ], + "spans": [ + { + "bbox": [ + 313, + 651, + 554, + 723 + ], + "type": "text", + "content": "Qualitative Evaluation. We provide qualitative comparisons between REPA [54] and REPA-E in Fig. 4. We generate images from the same noise and label using checkpoints at " + }, + { + "bbox": [ + 313, + 651, + 554, + 723 + ], + "type": "inline_equation", + "content": "50\\mathrm{K}" + }, + { + "bbox": [ + 313, + 651, + 554, + 723 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 313, + 651, + 554, + 723 + ], + "type": "inline_equation", + "content": "100\\mathrm{K}" + }, + { + "bbox": [ + 313, + 651, + 554, + 723 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 313, + 651, + 554, + 723 + ], + "type": "inline_equation", + "content": "400\\mathrm{K}" + }, + { + "bbox": [ + 313, + 651, + 554, + 723 + ], + "type": "text", + "content": " training iterations, respectively. As seen in Fig. 4, we observe that REPA-E demonstrates superior image generation quality compared to the" + } + ] + } + ], + "index": 28 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 742, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 742, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 742, + 309, + 750 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 29 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 56, + 59, + 254, + 223 + ], + "blocks": [ + { + "bbox": [ + 56, + 59, + 254, + 223 + ], + "lines": [ + { + "bbox": [ + 56, + 59, + 254, + 223 + ], + "spans": [ + { + "bbox": [ + 56, + 59, + 254, + 223 + ], + "type": "image", + "image_path": "70ad31221b2f68af8bce71e05adc8b7df7169655277a3c20d0ba2992aa8a31cc.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 226, + 240, + 237 + ], + "lines": [ + { + "bbox": [ + 67, + 226, + 240, + 237 + ], + "spans": [ + { + "bbox": [ + 67, + 226, + 240, + 237 + ], + "type": "text", + "content": "(a) PCA Visualization of Latent Space Structure [24]" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 273, + 59, + 553, + 222 + ], + "blocks": [ + { + "bbox": [ + 273, + 59, + 553, + 222 + ], + "lines": [ + { + "bbox": [ + 273, + 59, + 553, + 222 + ], + "spans": [ + { + "bbox": [ + 273, + 59, + 553, + 222 + ], + "type": "image", + "image_path": "77c564390a6c95f2c38a5d95ea756c20cec3d4210e716525a358c5d26a1aea66.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 275, + 225, + 552, + 237 + ], + "lines": [ + { + "bbox": [ + 275, + 225, + 552, + 237 + ], + "spans": [ + { + "bbox": [ + 275, + 225, + 552, + 237 + ], + "type": "text", + "content": "(b) Impact of End-to-End Tuning for Automatically Improving Latent Space Structure" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 55, + 239, + 555, + 316 + ], + "lines": [ + { + "bbox": [ + 55, + 239, + 555, + 316 + ], + "spans": [ + { + "bbox": [ + 55, + 239, + 555, + 316 + ], + "type": "text", + "content": "Figure 6. End-to-End Training Improves Latent Space Structure. (a) We observe that the latent space of pretrained VAEs can suffer either high noise components (e.g., SDXL-VAE, SD-VAE [40]), or, be over-smoothed and lack details (e.g., VA-VAE [50]). (b) The use of end-to-end tuning (" + }, + { + "bbox": [ + 55, + 239, + 555, + 316 + ], + "type": "inline_equation", + "content": "\\S 3.2" + }, + { + "bbox": [ + 55, + 239, + 555, + 316 + ], + "type": "text", + "content": ") automatically helps improve the latent space structure in a model-agnostic manner across different VAE architectures. For instance, similar to findings of concurrent work [44], we observe that SD-VAE suffers from high noise components in the latent space. Applying end-to-end training automatically helps adjust the latent space to reduce noise. In contrast, other VAEs such as recently proposed VA-VAE [50] suffer from an over-smoothed latent space. The use of end-to-end tuning with REPA-E automatically helps learn a more detailed latent-space structure to best support generation performance." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 58, + 327, + 293, + 440 + ], + "blocks": [ + { + "bbox": [ + 58, + 327, + 293, + 440 + ], + "lines": [ + { + "bbox": [ + 58, + 327, + 293, + 440 + ], + "spans": [ + { + "bbox": [ + 58, + 327, + 293, + 440 + ], + "type": "table", + "html": "
Target Repr.gFID↓sFID↓IS↑Prec.↑Rec.↑
I-JEPA-H [2]23.05.8160.30.620.60
+REPA-E (Ours)16.55.1873.60.680.60
CLIP-L [37]29.25.9846.40.590.61
+REPA-E (Ours)23.46.4457.10.620.60
DINOv2-B [33]24.16.2555.70.620.60
+REPA-E (Ours)16.35.6975.00.680.60
DINOv2-L [33]23.35.8959.90.610.60
+REPA-E (Ours)16.05.5977.70.680.58
", + "image_path": "4a22383a153c124e4af46b489760f62ae9e0676c1257c25773a561a405c1f621.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "type": "table", + "bbox": [ + 58, + 494, + 293, + 583 + ], + "blocks": [ + { + "bbox": [ + 55, + 441, + 295, + 487 + ], + "lines": [ + { + "bbox": [ + 55, + 441, + 295, + 487 + ], + "spans": [ + { + "bbox": [ + 55, + 441, + 295, + 487 + ], + "type": "text", + "content": "Table 3. Variation in Representation Encoder. REPA-E yields consistent performance improvements across different choices for the representation-encoder used for representation-alignment [54]. All baselines are reported using vanilla-REPA [54] for training." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 58, + 494, + 293, + 583 + ], + "lines": [ + { + "bbox": [ + 58, + 494, + 293, + 583 + ], + "spans": [ + { + "bbox": [ + 58, + 494, + 293, + 583 + ], + "type": "table", + "html": "
AutoencodergFID↓sFID↓IS↑Prec.↑Rec.↑
SD-VAE [40]24.16.2555.70.620.60
+REPA-E (Ours)16.35.6975.00.680.60
IN-VAE (f16d32)22.75.4756.00.620.62
+REPA-E (Ours)12.75.5784.00.690.62
VA-VAE [50]12.86.4783.80.710.58
+REPA-E (Ours)11.15.3188.80.720.61
", + "image_path": "4a1821b1fab0a4d9974dccd6b9994ef489d78001f259fa1cdc2878fe9134dcfe.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_body" + } + ], + "index": 7 + }, + { + "bbox": [ + 55, + 613, + 295, + 638 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 613, + 295, + 638 + ], + "spans": [ + { + "bbox": [ + 55, + 613, + 295, + 638 + ], + "type": "text", + "content": "REPA baseline, while also generating more structurally meaningful images during early stages of training process." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 55, + 645, + 275, + 658 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 645, + 275, + 658 + ], + "spans": [ + { + "bbox": [ + 55, + 645, + 275, + 658 + ], + "type": "text", + "content": "4.3. Generalization and Scalability of REPA-E" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 55, + 662, + 296, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 662, + 296, + 723 + ], + "spans": [ + { + "bbox": [ + 55, + 662, + 296, + 723 + ], + "type": "text", + "content": "We next analyze the generalization of the proposed approach to variation in training settings including model-size, tokenizer architecture, representation encoder, alignment depth [54] etc. Unless otherwise specified, all analysis and ablations use SiT-L [30] as the generative model," + } + ] + } + ], + "index": 11 + }, + { + "type": "table", + "bbox": [ + 317, + 327, + 550, + 416 + ], + "blocks": [ + { + "bbox": [ + 55, + 584, + 295, + 607 + ], + "lines": [ + { + "bbox": [ + 55, + 584, + 295, + 607 + ], + "spans": [ + { + "bbox": [ + 55, + 584, + 295, + 607 + ], + "type": "text", + "content": "Table 4. Variation in VAE Architecture. All baselines are reported using vanilla-REPA [54] for training." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 317, + 327, + 550, + 416 + ], + "lines": [ + { + "bbox": [ + 317, + 327, + 550, + 416 + ], + "spans": [ + { + "bbox": [ + 317, + 327, + 550, + 416 + ], + "type": "table", + "html": "
Aln. DepthgFID↓sFID↓IS↑Prec.↑Rec.↑
6th layer23.05.7259.20.620.60
+REPA-E (Ours)16.46.6474.30.670.59
8th layer24.16.2555.70.620.60
+REPA-E (Ours)16.35.6975.00.680.60
10th layer23.75.9156.90.620.60
+REPA-E (Ours)16.25.2274.70.680.58
", + "image_path": "adaae38fc3e6ec577cf064cdecf30feb67e4483e73d3134e2edd66601a68e38b.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "table_body" + } + ], + "index": 12 + }, + { + "type": "table", + "bbox": [ + 315, + 458, + 552, + 518 + ], + "blocks": [ + { + "bbox": [ + 313, + 418, + 555, + 452 + ], + "lines": [ + { + "bbox": [ + 313, + 418, + 555, + 452 + ], + "spans": [ + { + "bbox": [ + 313, + 418, + 555, + 452 + ], + "type": "text", + "content": "Table 5. Variation in Alignment Depth. End-to-End tuning (REPA-E) gives consistent performance imrpoements over original REPA [54] across varying alignment-depths." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 315, + 458, + 552, + 518 + ], + "lines": [ + { + "bbox": [ + 315, + 458, + 552, + 518 + ], + "spans": [ + { + "bbox": [ + 315, + 458, + 552, + 518 + ], + "type": "table", + "html": "
ComponentgFID↓sFID↓IS↑Prec.↑Rec.↑
w/o stopgrad444.1460.31.490.000.00
w/o batch-norm18.15.3272.40.670.59
w/o LGAN19.26.4768.20.640.58
REPA-E (Ours)16.35.6975.00.680.60
", + "image_path": "5236958fea9f022969d3155f5426594fa733d4d77a23e4336c7f34abd68a2ea8.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "table_body" + } + ], + "index": 14 + }, + { + "bbox": [ + 322, + 520, + 545, + 531 + ], + "lines": [ + { + "bbox": [ + 322, + 520, + 545, + 531 + ], + "spans": [ + { + "bbox": [ + 322, + 520, + 545, + 531 + ], + "type": "text", + "content": "Table 6. Ablation Study on Role of Different Components." + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 313, + 541, + 555, + 613 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 541, + 555, + 613 + ], + "spans": [ + { + "bbox": [ + 313, + 541, + 555, + 613 + ], + "type": "text", + "content": "SD-VAE as the VAE, and DINOv2-B [33] as the pretrained vision model for REPA loss [54]. Default REPA alignment-depth of 8 is used. We train each variant for 100K iterations and report results without classifier-free guidance [18]. All baseline numbers are reported using vanilla REPA and compared with end-to-end training using REPA-E." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 313, + 614, + 556, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 614, + 556, + 723 + ], + "spans": [ + { + "bbox": [ + 313, + 614, + 556, + 723 + ], + "type": "text", + "content": "Impact of Model Size. Tab. 2 compares SiT-B, SiT-L, and SiT-XL to evaluate the effect of model size. We make two key observations. First, across all configurations, REPA-E consistently improves performance over the REPA baseline. Specifically, it reduces gFID from " + }, + { + "bbox": [ + 313, + 614, + 556, + 723 + ], + "type": "inline_equation", + "content": "49.5 \\rightarrow 34.8" + }, + { + "bbox": [ + 313, + 614, + 556, + 723 + ], + "type": "text", + "content": " for SiT-B, " + }, + { + "bbox": [ + 313, + 614, + 556, + 723 + ], + "type": "inline_equation", + "content": "24.1 \\rightarrow 16.3" + }, + { + "bbox": [ + 313, + 614, + 556, + 723 + ], + "type": "text", + "content": " for SiT-L, and " + }, + { + "bbox": [ + 313, + 614, + 556, + 723 + ], + "type": "inline_equation", + "content": "19.4 \\rightarrow 12.8" + }, + { + "bbox": [ + 313, + 614, + 556, + 723 + ], + "type": "text", + "content": " for SiT-XL, demonstrating the effectiveness. Second, surprisingly the percentage gains in gFID achieved with REPA-E (over REPA) improve with increasing model size. For in-" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 56, + 70, + 294, + 231 + ], + "blocks": [ + { + "bbox": [ + 56, + 70, + 294, + 231 + ], + "lines": [ + { + "bbox": [ + 56, + 70, + 294, + 231 + ], + "spans": [ + { + "bbox": [ + 56, + 70, + 294, + 231 + ], + "type": "table", + "html": "
MethodgFID↓sFID↓IS↑Prec.↑Rec.↑
100K Iterations (20 Epochs)
REPA [54]19.406.0667.40.640.61
REPA-E (scratch)14.127.8783.50.700.59
REPA-E (VAE init.)12.835.0488.80.710.58
200K Iterations (40 Epochs)
REPA [54]11.105.05100.40.690.64
REPA-E (scratch)7.546.17120.40.740.61
REPA-E (VAE init.)7.174.39123.70.740.62
400K Iterations (80 Epochs)
REPA [54]7.905.06122.60.700.65
REPA-E (scratch)4.344.44154.30.750.63
REPA-E (VAE init.)4.074.60161.80.760.62
", + "image_path": "5afd84a905078fcc6a268b92ea0c21c222abc451154a92f902aa8edef8767ab1.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + }, + { + "bbox": [ + 55, + 233, + 294, + 300 + ], + "lines": [ + { + "bbox": [ + 55, + 233, + 294, + 300 + ], + "spans": [ + { + "bbox": [ + 55, + 233, + 294, + 300 + ], + "type": "text", + "content": "Table 7. End-to-End Training from Scratch. We find that while initializing the VAE with pretrained weights (SD-VAE [40]) helps slightly improve performance, REPA-E can be used to train both VAE and LDM from scratch in an end-to-end manner; still achieving significantly superior performance over REPA which requires a separate stage for training VAE in addition to LDM training." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_footnote" + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 314, + 294, + 387 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 314, + 294, + 387 + ], + "spans": [ + { + "bbox": [ + 55, + 314, + 294, + 387 + ], + "type": "text", + "content": "stance, for SiT-B model REPA-E leads to a " + }, + { + "bbox": [ + 55, + 314, + 294, + 387 + ], + "type": "inline_equation", + "content": "29.6\\%" + }, + { + "bbox": [ + 55, + 314, + 294, + 387 + ], + "type": "text", + "content": " improvement in gFID over REPA. Surprisingly even more gains are achieved for bigger models improving gFID by " + }, + { + "bbox": [ + 55, + 314, + 294, + 387 + ], + "type": "inline_equation", + "content": "32.3\\%" + }, + { + "bbox": [ + 55, + 314, + 294, + 387 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 55, + 314, + 294, + 387 + ], + "type": "inline_equation", + "content": "34.0\\%" + }, + { + "bbox": [ + 55, + 314, + 294, + 387 + ], + "type": "text", + "content": " for SiT-L and SiT-XL models respectively. This trend highlights the scalability of REPA-E; larger models achieve better percentage gains over vanilla-REPA." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 391, + 294, + 486 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 391, + 294, + 486 + ], + "spans": [ + { + "bbox": [ + 55, + 391, + 294, + 486 + ], + "type": "text", + "content": "Variation in Representation Encoder. We report results across different perception model encoders (CLIP-L, I-JEPA-H, DINOv2-B, and DINOv2-L) Tab. 3. We observe that REPA-E gives consistent performance improvements over REPA, across different choices of the perceptual encoder model. In particular, with DINOv2-B and DINOv2-L, REPA-E significantly reduces gFID from " + }, + { + "bbox": [ + 55, + 391, + 294, + 486 + ], + "type": "inline_equation", + "content": "24.1 \\rightarrow 16.3" + }, + { + "bbox": [ + 55, + 391, + 294, + 486 + ], + "type": "text", + "content": " and from " + }, + { + "bbox": [ + 55, + 391, + 294, + 486 + ], + "type": "inline_equation", + "content": "23.3 \\rightarrow 16.0" + }, + { + "bbox": [ + 55, + 391, + 294, + 486 + ], + "type": "text", + "content": ", respectively." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 491, + 295, + 634 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 491, + 295, + 634 + ], + "spans": [ + { + "bbox": [ + 55, + 491, + 295, + 634 + ], + "type": "text", + "content": "Variation in VAE. Tab. 4 evaluates the impact of different VAEs on REPA-E performance. In particular, we report results using three different VAEs 1) SD-VAE [1], 2) VA-VAE [50] and 3) IN-VAE (a " + }, + { + "bbox": [ + 55, + 491, + 295, + 634 + ], + "type": "inline_equation", + "content": "16\\times" + }, + { + "bbox": [ + 55, + 491, + 295, + 634 + ], + "type": "text", + "content": " downsampling, 32-channel VAE trained on ImageNet [6] using official training code from [40]). Across all variations, REPA-E consistently improves performance over the REPA baseline. REPA-E reduces gFID from " + }, + { + "bbox": [ + 55, + 491, + 295, + 634 + ], + "type": "inline_equation", + "content": "24.1\\rightarrow 16.3" + }, + { + "bbox": [ + 55, + 491, + 295, + 634 + ], + "type": "text", + "content": " from " + }, + { + "bbox": [ + 55, + 491, + 295, + 634 + ], + "type": "inline_equation", + "content": "22.7\\rightarrow 12.7" + }, + { + "bbox": [ + 55, + 491, + 295, + 634 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 55, + 491, + 295, + 634 + ], + "type": "inline_equation", + "content": "12.8\\rightarrow 11.1" + }, + { + "bbox": [ + 55, + 491, + 295, + 634 + ], + "type": "text", + "content": " for SD-VAE, IN-VAE and VA-VAE, respectively. The results demonstrate that REPA-E robustly improves generative quality across diverse variations in architecture, pretraining dataset and training setting of the VAE." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 639, + 295, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 639, + 295, + 723 + ], + "spans": [ + { + "bbox": [ + 55, + 639, + 295, + 723 + ], + "type": "text", + "content": "Variation in Alignment Depth. Tab. 5 investigates the effect of applying the alignment loss at different layers the diffusion model. We observe that REPA-E consistently enhances generation quality over the REPA baseline across variation in choice of alignment depth; with gFID improving from " + }, + { + "bbox": [ + 55, + 639, + 295, + 723 + ], + "type": "inline_equation", + "content": "23.0 \\rightarrow 16.4" + }, + { + "bbox": [ + 55, + 639, + 295, + 723 + ], + "type": "text", + "content": " (6th layer), " + }, + { + "bbox": [ + 55, + 639, + 295, + 723 + ], + "type": "inline_equation", + "content": "24.1 \\rightarrow 16.3" + }, + { + "bbox": [ + 55, + 639, + 295, + 723 + ], + "type": "text", + "content": " (8th layer), and " + }, + { + "bbox": [ + 55, + 639, + 295, + 723 + ], + "type": "inline_equation", + "content": "23.7 \\rightarrow 16.2" + }, + { + "bbox": [ + 55, + 639, + 295, + 723 + ], + "type": "text", + "content": " (10th layer)." + } + ] + } + ], + "index": 5 + }, + { + "type": "table", + "bbox": [ + 316, + 70, + 553, + 221 + ], + "blocks": [ + { + "bbox": [ + 316, + 70, + 553, + 221 + ], + "lines": [ + { + "bbox": [ + 316, + 70, + 553, + 221 + ], + "spans": [ + { + "bbox": [ + 316, + 70, + 553, + 221 + ], + "type": "table", + "html": "
VAEDiffusion modelREPAgFID-50K
SD-VAE [40]DiT-XL [34]19.82
VA-VAE [50]DiT-XL [34]6.74
E2E-VAE (Ours)DiT-XL [34]6.75
SD-VAE [40]SiT-XL [30]17.20
VA-VAE [50]SiT-XL [30]5.93
E2E-VAE (Ours)SiT-XL [30]5.26
SD-VAE [40]DiT-XL [34]12.29
VA-VAE [50]DiT-XL [34]4.71
E2E-VAE (Ours)DiT-XL [34]4.20
SD-VAE [40]SiT-XL [30]7.90
VA-VAE [50]SiT-XL [30]4.88
E2E-VAE (Ours)SiT-XL [30]3.46
", + "image_path": "3e1b51168691e30ece2a426a5f9344b7b39df1ab9f2ca7cae0527e56816b6325.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_body" + }, + { + "bbox": [ + 313, + 223, + 553, + 300 + ], + "lines": [ + { + "bbox": [ + 313, + 223, + 553, + 300 + ], + "spans": [ + { + "bbox": [ + 313, + 223, + 553, + 300 + ], + "type": "text", + "content": "Table 8. Impact of End-to-End Tuning on VAE Performance. We find that once tuned using REPA-E, the finetuned VAEs can be used as a drop-in replacement for their original counterparts offering significantly accelerated generation performance. We fix all the VAEs and only train the diffusion models (with and w/o REPA). E2E-VAE is obtained from REPA-E fine-tuning (VA-VAE + SiT-XL). All results are reported at 80 epochs (400K iterations)." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_footnote" + } + ], + "index": 6 + }, + { + "bbox": [ + 313, + 308, + 553, + 452 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 308, + 553, + 452 + ], + "spans": [ + { + "bbox": [ + 313, + 308, + 553, + 452 + ], + "type": "text", + "content": "Ablation on Design Components. We also perform ablation studies analyzing the importance of each component discussed in Sec. 3.2. Results are shown in Tab. 6. We observe that each component plays a key role in the final performance for REPA-E. In particular, we observe that the stop-grad operation on the diffusion loss helps prevent degradation of the latent-space structure. Similarly, the use of batch norm is useful adaptively normalizing the latent-statistics and helps improve the gFID from " + }, + { + "bbox": [ + 313, + 308, + 553, + 452 + ], + "type": "inline_equation", + "content": "18.09 \\rightarrow 16.3" + }, + { + "bbox": [ + 313, + 308, + 553, + 452 + ], + "type": "text", + "content": ". Similarly, the regularization losses play a key role in maintaining the reconstruction performance of the finetuned VAE, thereby improving the gFID from " + }, + { + "bbox": [ + 313, + 308, + 553, + 452 + ], + "type": "inline_equation", + "content": "19.07 \\rightarrow 16.3" + }, + { + "bbox": [ + 313, + 308, + 553, + 452 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 453, + 553, + 596 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 453, + 553, + 596 + ], + "spans": [ + { + "bbox": [ + 313, + 453, + 553, + 596 + ], + "type": "text", + "content": "End-to-End Training from Scratch. We next analyze the impact of VAE initialization on end-to-end training. As shown in Tab. 7, we find that while initializing the VAE from pretrained weights helps slightly improve performance, REPA-E can be used to train both VAE and LDM from scratch still achieving superior performance over REPA, which technically requires a separate stage for VAE training in addition to LDM training. For instance, while REPA achieves a FID of 5.90 after 4M iterations, REPA-E while training entirely from scratch (for both VAE and LDM) achieves much faster and better generation FID of 4.34 within just 400K iterations." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 607, + 515, + 620 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 607, + 515, + 620 + ], + "spans": [ + { + "bbox": [ + 313, + 607, + 515, + 620 + ], + "type": "text", + "content": "4.4. Impact of End-to-End Tuning on VAE" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 625, + 553, + 697 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 625, + 553, + 697 + ], + "spans": [ + { + "bbox": [ + 313, + 625, + 553, + 697 + ], + "type": "text", + "content": "We next analyze the impact of end-to-end tuning on the VAE. In particular, we first show that end-to-end tuning improves the latent-space structure (Fig. 6). We next show that once tuned using REPA-E, the finetuned VAEs can be used as a drop-in replacement for their original counterparts offering significantly improved generation performance." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 699, + 553, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 699, + 553, + 723 + ], + "spans": [ + { + "bbox": [ + 313, + 699, + 553, + 723 + ], + "type": "text", + "content": "End-to-End Training improves Latent Space Structure. Results are shown in Fig. 6. Following [24], we visu" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 55, + 69, + 555, + 301 + ], + "blocks": [ + { + "bbox": [ + 55, + 69, + 555, + 301 + ], + "lines": [ + { + "bbox": [ + 55, + 69, + 555, + 301 + ], + "spans": [ + { + "bbox": [ + 55, + 69, + 555, + 301 + ], + "type": "table", + "html": "
TokenizerMethodTraining Epoches#paramsrFID↓Generation w/o CFGGeneration w/ CFG
gFID↓sFID↓IS↑Prec.↑Rec.↑gFID↓sFID↓IS↑Prec.↑Rec.↑
AutoRegressive (AR)
MaskGiTMaskGIT [4]555227M2.286.18-182.10.800.51-----
VQGANLlamaGen [45]3003.1B0.599.388.24112.90.690.672.185.97263.30.810.58
VQVAEVAR [46]3502.0B------1.80-365.40.830.57
LFQ tokenizersMagViT-v2 [52]1080307M1.503.65-200.5--1.78-319.4--
LDMMAR [27]800945M0.532.35-227.80.790.621.55-303.70.810.62
Latent Diffusion Models (LDM)
SD-VAE [40]MaskDiT [56]1600675M0.615.6910.34177.90.740.602.285.67276.60.800.61
DiT [34]1400675M9.626.85121.50.670.672.274.60278.20.830.57
SiT [30]1400675M8.616.32131.70.680.672.064.50270.30.820.59
FasterDiT [51]400675M7.915.45131.30.670.692.034.63264.00.810.60
MDT [12]1300675M6.235.23143.00.710.651.794.57283.00.810.61
MDTv2 [13]1080675M-----1.584.52314.70.790.65
Representation Alignment Methods
VA-VAE [50]LightningDiT [50]80675M0.284.29---------
800675M2.054.37207.70.770.661.254.15295.30.800.65
SD-VAEREPA [54]80675M0.617.905.06122.60.700.65-----
800675M5.845.79158.70.700.681.284.68305.70.790.64
E2E-VAE (Ours)REPA80675M0.283.464.17159.80.770.631.674.12266.30.800.63
800675M1.694.17219.30.770.671.124.09302.90.790.66
", + "image_path": "3bd08f1e01a8a2b5250db10979a050e6a8557330b32383b5d72fc672ca983201.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 308, + 555, + 376 + ], + "lines": [ + { + "bbox": [ + 55, + 308, + 555, + 376 + ], + "spans": [ + { + "bbox": [ + 55, + 308, + 555, + 376 + ], + "type": "text", + "content": "Table 9. System-Level Performance on ImageNet " + }, + { + "bbox": [ + 55, + 308, + 555, + 376 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 55, + 308, + 555, + 376 + ], + "type": "text", + "content": " comparing our end-to-end tuned VAE (E2E-VAE) with other VAEs for traditional LDM training. Note that all representation alignment methods at 800 epochs are evaluated using a class-balanced sampling protocol, as detailed in App. C. We observe that in addition to improving VAE latent space structure (Fig. 6), end-to-end tuning significantly improves VAE downstream generation performance. Once tuned using REPA-E, the improved VAE can be used as drop-in replacement for their original counterparts for accelerated generation performance. Overall, our approach helps improve both LDM and VAE performance — achieving a new state-of-the-art FID of 1.12 and 0.28, respectively for LDM generation and VAE reconstruction performance." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 54, + 396, + 294, + 576 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 396, + 294, + 576 + ], + "spans": [ + { + "bbox": [ + 54, + 396, + 294, + 576 + ], + "type": "text", + "content": "alize latent space structure using principal component analysis (PCA) that projects them to three channels colored by RGB. We consider three different VAEs: 1) SD-VAE [40], 2) IN-VAE (a " + }, + { + "bbox": [ + 54, + 396, + 294, + 576 + ], + "type": "inline_equation", + "content": "16\\times" + }, + { + "bbox": [ + 54, + 396, + 294, + 576 + ], + "type": "text", + "content": " downsampling, 32-channel VAE trained on ImageNet [6]). 3) VA-VAE from recent work from [50]. We observe that end-to-end tuning using REPA-E automatically improves the latent space structure of the original VAE. For instance, similar to findings of concurrent work [44], we observe that SD-VAE suffers from high noise components in the latent space. Applying end-to-end training automatically helps adjust the latent space to learn reduce noise. In contrast, other VAEs such as recently proposed VA-VAE [50] suffer from over-smother latent space. Application of E2E tuning automatically helps learn a more detailed latent-space to best support generation performance." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 578, + 295, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 578, + 295, + 723 + ], + "spans": [ + { + "bbox": [ + 55, + 578, + 295, + 723 + ], + "type": "text", + "content": "End-to-End Training Improves VAE Performance. We next evaluate the impact of end-to-end tuning on downstream generation performance of the VAE. To this end, we first use end-to-end tuning for finetuning the recently proposed VA-VAE [50]. We then use the resulting end-to-end finetuned-VAE (named E2E-VAE), and compare its downstream generation performance with current state-of-the-art VAEs; including SDVAE [40] and VA-VAE [50]. To do this, we conduct traditional latent diffusion model training (w/o REPA-E), where only the generator network is updated while keeping the VAE frozen. Tab. 8 shows the comparison of VAE downstream generation across diverse train" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 313, + 396, + 555, + 482 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 396, + 555, + 482 + ], + "spans": [ + { + "bbox": [ + 313, + 396, + 555, + 482 + ], + "type": "text", + "content": "ing settings. We observe that end-to-end tuned VAEs consistently outperform their original counterparts for downstream generation tasks across variations in LDM architecture and training settings. Interestingly, we observe that a VAE tuned using SiT-XL yields performance improvements even when using a different LDM architecture such as DiT-XL; thereby demonstrating the robustness of our approach." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 314, + 523, + 388, + 537 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 523, + 388, + 537 + ], + "spans": [ + { + "bbox": [ + 314, + 523, + 388, + 537 + ], + "type": "text", + "content": "5. Conclusion" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 313, + 555, + 556, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 555, + 556, + 723 + ], + "spans": [ + { + "bbox": [ + 313, + 555, + 556, + 723 + ], + "type": "text", + "content": "\"Can we unlock VAE's for performing end-to-end training with latent diffusion transformers?\" Directly backpropagating diffusion loss to the VAE is ineffective and even degrages final performance. We show that while diffusion loss is ineffective, end-to-end training can be unlocked using REPA loss. Our end-to-end training recipe (REPA-E), significantly improves latent-space structure, shows remarkable performance; speeding up diffusion model training by over " + }, + { + "bbox": [ + 313, + 555, + 556, + 723 + ], + "type": "inline_equation", + "content": "17 \\times" + }, + { + "bbox": [ + 313, + 555, + 556, + 723 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 555, + 556, + 723 + ], + "type": "inline_equation", + "content": "45 \\times" + }, + { + "bbox": [ + 313, + 555, + 556, + 723 + ], + "type": "text", + "content": " over REPA and vanilla training recipes. Overall, our approach achieves a new state-of-the-art results with generation FID of 1.12 and 1.69 with and without use of classifier-free guidance. We hope that our work can help foster further research for enabling end-to-end training with latent diffusion transformers." + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 72, + 153, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 72, + 153, + 85 + ], + "spans": [ + { + "bbox": [ + 56, + 72, + 153, + 85 + ], + "type": "text", + "content": "Acknowledgments" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 91, + 297, + 191 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 91, + 297, + 191 + ], + "spans": [ + { + "bbox": [ + 55, + 91, + 297, + 191 + ], + "type": "text", + "content": "We would like to extend our deepest appreciation to Zeyu Zhang, Qinyu Zhao, and Zhanhao Liang for insightful discussions. We would also like to thank all reviewers for their constructive feedback. This work was supported in part by the Australian Research Council under Discovery Project DP210102801 and Future Fellowship FT240100820. SX acknowledges support from the OpenPath AI Foundation, IITP grant funded by the Korean Government (MSIT) (No. RS-2024-00457882) and NSF Award IIS-2443404." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 56, + 215, + 115, + 227 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 215, + 115, + 227 + ], + "spans": [ + { + "bbox": [ + 56, + 215, + 115, + 227 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 57, + 235, + 296, + 723 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 61, + 235, + 294, + 267 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 235, + 294, + 267 + ], + "spans": [ + { + "bbox": [ + 61, + 235, + 294, + 267 + ], + "type": "text", + "content": "[1] Stability AI. Improved autoencoders ... https://huggingface.co/stabilityyai/sd-vae-ft-mse, n.d. Accessed: April 11, 2025. 5, 8" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 61, + 270, + 296, + 335 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 270, + 296, + 335 + ], + "spans": [ + { + "bbox": [ + 61, + 270, + 296, + 335 + ], + "type": "text", + "content": "[2] Mahmoud Assran, Quentin Duval, Ishan Misra, Piotr Bojanowski, Pascal Vincent, Michael Rabbat, Yann LeCun, and Nicolas Ballas. Self-supervised learning from images with a joint-embedding predictive architecture. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 15619–15629, 2023. 7" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 62, + 337, + 296, + 370 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 337, + 296, + 370 + ], + "spans": [ + { + "bbox": [ + 62, + 337, + 296, + 370 + ], + "type": "text", + "content": "[3] Dana H Ballard. Modular learning in neural networks. In Proceedings of the sixth National conference on Artificial intelligence-Volume 1, pages 279-284, 1987. 3" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 62, + 372, + 296, + 425 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 372, + 296, + 425 + ], + "spans": [ + { + "bbox": [ + 62, + 372, + 296, + 425 + ], + "type": "text", + "content": "[4] Huiwen Chang, Han Zhang, Lu Jiang, Ce Liu, and William T Freeman. Maskgit: Masked generative image transformer. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11315-11325, 2022. 9" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 62, + 428, + 296, + 483 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 428, + 296, + 483 + ], + "spans": [ + { + "bbox": [ + 62, + 428, + 296, + 483 + ], + "type": "text", + "content": "[5] Junsong Chen, Jincheng Yu, Chongjian Ge, Lewei Yao, Enze Xie, Yue Wu, Zhongdao Wang, James Kwok, Ping Luo, Huchuan Lu, et al. Pixart-alpha: Fast training of diffusion transformer for photorealistic text-to-image synthesis. arXiv preprint arXiv:2310.00426, 2023. 3" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 62, + 484, + 296, + 538 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 484, + 296, + 538 + ], + "spans": [ + { + "bbox": [ + 62, + 484, + 296, + 538 + ], + "type": "text", + "content": "[6] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In 2009 IEEE conference on computer vision and pattern recognition, pages 248-255. IEEE, 2009. 2, 5, 6, 8, 9, 13" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 62, + 541, + 296, + 574 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 541, + 296, + 574 + ], + "spans": [ + { + "bbox": [ + 62, + 541, + 296, + 574 + ], + "type": "text", + "content": "[7] Prafulla Dhariwal and Alexander Nichol. Diffusion models beat gans on image synthesis. Advances in neural information processing systems, 34:8780-8794, 2021. 5" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 62, + 576, + 296, + 652 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 576, + 296, + 652 + ], + "spans": [ + { + "bbox": [ + 62, + 576, + 296, + 652 + ], + "type": "text", + "content": "[8] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, and Neil Houlsby. An image is worth 16x16 words: Transformers for image recognition at scale. In International Conference on Learning Representations, 2021. 3" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 62, + 654, + 296, + 699 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 654, + 296, + 699 + ], + "spans": [ + { + "bbox": [ + 62, + 654, + 296, + 699 + ], + "type": "text", + "content": "[9] Patrick Esser, Robin Rombach, and Bjorn Ommer. Taming transformers for high-resolution image synthesis. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 12873-12883, 2021. 3" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 57, + 700, + 296, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 700, + 296, + 723 + ], + "spans": [ + { + "bbox": [ + 57, + 700, + 296, + 723 + ], + "type": "text", + "content": "[10] Patrick Esser, Sumith Kulal, Andreas Blattmann, Rahim Entezari, Jonas Müller, Harry Saini, Yam Levi, Dominik" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 316, + 73, + 553, + 723 + ], + "type": "list", + "angle": 0, + "index": 29, + "blocks": [ + { + "bbox": [ + 333, + 73, + 553, + 117 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 333, + 73, + 553, + 117 + ], + "spans": [ + { + "bbox": [ + 333, + 73, + 553, + 117 + ], + "type": "text", + "content": "Lorenz, Axel Sauer, Frederic Boesel, et al. Scaling rectified flow transformers for high-resolution image synthesis. In *Forty-first International Conference on Machine Learning*, 2024. 3, 13" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 316, + 118, + 553, + 173 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 118, + 553, + 173 + ], + "spans": [ + { + "bbox": [ + 316, + 118, + 553, + 173 + ], + "type": "text", + "content": "[11] Peng Gao, Le Zhuo, Ziyi Lin, Chris Liu, Junsong Chen, Ruoyi Du, Enze Xie, Xu Luo, Longtian Qiu, Yuhang Zhang, et al. Lumina-t2x: Transforming text into any modality, resolution, and duration via flow-based large diffusion transformers. arXiv preprint arXiv:2405.05945, 2024. 3" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 316, + 175, + 553, + 228 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 175, + 553, + 228 + ], + "spans": [ + { + "bbox": [ + 316, + 175, + 553, + 228 + ], + "type": "text", + "content": "[12] Shanghua Gao, Pan Zhou, Ming-Ming Cheng, and Shuicheng Yan. Masked diffusion transformer is a strong image synthesizer. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 23164-23173, 2023. 9" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 316, + 230, + 553, + 272 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 230, + 553, + 272 + ], + "spans": [ + { + "bbox": [ + 316, + 230, + 553, + 272 + ], + "type": "text", + "content": "[13] Shanghua Gao, Pan Zhou, Ming-Ming Cheng, and Shuicheng Yan. Mdtv2: Masked diffusion transformer is a strong image synthesizer. arXiv preprint arXiv:2303.14389, 2023. 9" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 316, + 274, + 553, + 305 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 274, + 553, + 305 + ], + "spans": [ + { + "bbox": [ + 316, + 274, + 553, + 305 + ], + "type": "text", + "content": "[14] Ross Girshick. Fast r-cnn. In Proceedings of the IEEE international conference on computer vision, pages 1440-1448, 2015. 1" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 316, + 308, + 553, + 361 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 308, + 553, + 361 + ], + "spans": [ + { + "bbox": [ + 316, + 308, + 553, + 361 + ], + "type": "text", + "content": "[15] Ross Girshick, Jeff Donahue, Trevor Darrell, and Jitendra Malik. Rich feature hierarchies for accurate object detection and semantic segmentation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 580-587, 2014. 1" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 316, + 364, + 553, + 407 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 364, + 553, + 407 + ], + "spans": [ + { + "bbox": [ + 316, + 364, + 553, + 407 + ], + "type": "text", + "content": "[16] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 770-778, 2016. 3" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 316, + 409, + 553, + 462 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 409, + 553, + 462 + ], + "spans": [ + { + "bbox": [ + 316, + 409, + 553, + 462 + ], + "type": "text", + "content": "[17] Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, and Sepp Hochreiter. Gans trained by a two time-scale update rule converge to a local nash equilibrium. Advances in neural information processing systems, 30, 2017. 5" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 316, + 464, + 553, + 486 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 464, + 553, + 486 + ], + "spans": [ + { + "bbox": [ + 316, + 464, + 553, + 486 + ], + "type": "text", + "content": "[18] Jonathan Ho and Tim Salimans. Classifier-free diffusion guidance. arXiv preprint arXiv:2207.12598, 2022. 6, 7" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 316, + 487, + 553, + 520 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 487, + 553, + 520 + ], + "spans": [ + { + "bbox": [ + 316, + 487, + 553, + 520 + ], + "type": "text", + "content": "[19] Minyoung Huh, Brian Cheung, Tongzhou Wang, and Phillip Isola. The platonic representation hypothesis. In International Conference on Machine Learning, 2024. 3" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 316, + 521, + 553, + 564 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 521, + 553, + 564 + ], + "spans": [ + { + "bbox": [ + 316, + 521, + 553, + 564 + ], + "type": "text", + "content": "[20] Sergey Ioffe and Christian Szegedy. Batch normalization: Accelerating deep network training by reducing internal covariate shift. In International conference on machine learning, pages 448-456. pmlr, 2015. 4, 5" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 316, + 566, + 553, + 620 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 566, + 553, + 620 + ], + "spans": [ + { + "bbox": [ + 316, + 566, + 553, + 620 + ], + "type": "text", + "content": "[21] Dongwon Kim, Ju He, Qihang Yu, Chenglin Yang, Xiaohui Shen, Suha Kwak, and Liang-Chieh Chen. Democratizing text-to-image masked generative models with compact text-aware one-dimensional tokens. arXiv preprint arXiv:2501.07730, 2025. 3" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 316, + 622, + 553, + 643 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 622, + 553, + 643 + ], + "spans": [ + { + "bbox": [ + 316, + 622, + 553, + 643 + ], + "type": "text", + "content": "[22] Diederik P Kingma. Auto-encoding variational bayes. arXiv preprint arXiv:1312.6114, 2013. 1, 3" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 316, + 645, + 553, + 677 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 645, + 553, + 677 + ], + "spans": [ + { + "bbox": [ + 316, + 645, + 553, + 677 + ], + "type": "text", + "content": "[23] Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014.5" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 316, + 678, + 553, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 678, + 553, + 723 + ], + "spans": [ + { + "bbox": [ + 316, + 678, + 553, + 723 + ], + "type": "text", + "content": "[24] Theodoros Kouzelis, Ioannis Kakogeorgiou, Spyros Gidaris, and Nikos Komodakis. Eq-vae: Equivalence regularized latent space for improved generative image modeling. arXiv preprint arXiv:2502.09509, 2025. 2, 3, 7, 8" + } + ] + } + ], + "index": 28 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 312, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 312, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 312, + 750 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 30 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 72, + 294, + 722 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 56, + 72, + 294, + 117 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 72, + 294, + 117 + ], + "spans": [ + { + "bbox": [ + 56, + 72, + 294, + 117 + ], + "type": "text", + "content": "[25] Tuomas Kynkänniemi, Tero Karras, Samuli Laine, Jaakko Lehtinen, and Timo Aila. Improved precision and recall metric for assessing generative models. Advances in neural information processing systems, 32, 2019. 5" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 56, + 118, + 294, + 139 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 118, + 294, + 139 + ], + "spans": [ + { + "bbox": [ + 56, + 118, + 294, + 139 + ], + "type": "text", + "content": "[26] Black Forest Labs. Flux. https://github.com/black-forest-labs/flux, 2024.3" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 56, + 140, + 294, + 184 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 140, + 294, + 184 + ], + "spans": [ + { + "bbox": [ + 56, + 140, + 294, + 184 + ], + "type": "text", + "content": "[27] Tianhong Li, Yonglong Tian, He Li, Mingyang Deng, and Kaiming He. Autoregressive image generation without vector quantization. Advances in Neural Information Processing Systems, 37:56424-56445, 2025. 9, 14" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 56, + 186, + 294, + 240 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 186, + 294, + 240 + ], + "spans": [ + { + "bbox": [ + 56, + 186, + 294, + 240 + ], + "type": "text", + "content": "[28] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dólar, and C Lawrence Zitnick. Microsoft coco: Common objects in context. In European conference on computer vision, pages 740-755. Springer, 2014. 13" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 56, + 241, + 294, + 263 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 241, + 294, + 263 + ], + "spans": [ + { + "bbox": [ + 56, + 241, + 294, + 263 + ], + "type": "text", + "content": "[29] I Loshchilov. Decoupled weight decay regularization. arXiv preprint arXiv:1711.05101, 2017. 5" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 264, + 294, + 330 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 264, + 294, + 330 + ], + "spans": [ + { + "bbox": [ + 56, + 264, + 294, + 330 + ], + "type": "text", + "content": "[30] Nanye Ma, Mark Goldstein, Michael S Albergo, Nicholas M Boffi, Eric Vanden-Eijnden, and Saining Xie. Sit: Exploring flow and diffusion-based generative models with scalable interpolant transformers. In European Conference on Computer Vision, pages 23-40. Springer, 2024. 2, 3, 5, 6, 7, 8, 9, 13" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 56, + 331, + 294, + 374 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 331, + 294, + 374 + ], + "spans": [ + { + "bbox": [ + 56, + 331, + 294, + 374 + ], + "type": "text", + "content": "[31] Charlie Nash, Jacob Menick, Sander Dieleman, and Peter Battaglia. Generating images with sparse representations. In International Conference on Machine Learning, pages 7958-7968. PMLR, 2021. 5" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 56, + 376, + 291, + 387 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 376, + 291, + 387 + ], + "spans": [ + { + "bbox": [ + 56, + 376, + 291, + 387 + ], + "type": "text", + "content": "[32] OpenAI. Sora. https://openai.com/sora, 2024.3" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 56, + 388, + 294, + 453 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 388, + 294, + 453 + ], + "spans": [ + { + "bbox": [ + 56, + 388, + 294, + 453 + ], + "type": "text", + "content": "[33] Maxime Oquab, Timothee Darct, Theo Moutakanni, Huy Vo, Marc Szafraniec, Vasil Khalidov, Pierre Fernandez, Daniel Haziza, Francisco Massa, Alaaeldin El-Nouby, et al. Dinov2: Learning robust visual features without supervision. Transactions on Machine Learning Research Journal, pages 1-31, 2024. 4, 5, 7" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 56, + 455, + 294, + 498 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 455, + 294, + 498 + ], + "spans": [ + { + "bbox": [ + 56, + 455, + 294, + 498 + ], + "type": "text", + "content": "[34] William Peebles and Saining Xie. Scalable diffusion models with transformers. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 4195-4205, 2023. 3, 6, 8, 9" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 56, + 500, + 294, + 554 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 500, + 294, + 554 + ], + "spans": [ + { + "bbox": [ + 56, + 500, + 294, + 554 + ], + "type": "text", + "content": "[35] Pablo Pernias, Dominic Rampas, Mats Leon Richter, Christopher Pal, and Marc Aubreville. Würstchen: An efficient architecture for large-scale text-to-image diffusion models. In The Twelfth International Conference on Learning Representations, 2023. 3" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 56, + 555, + 294, + 609 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 555, + 294, + 609 + ], + "spans": [ + { + "bbox": [ + 56, + 555, + 294, + 609 + ], + "type": "text", + "content": "[36] Dustin Podell, Zion English, Kyle Lacey, Andreas Blattmann, Tim Dockhorn, Jonas Müller, Joe Penna, and Robin Rombach. SDXL: Improving latent diffusion models for high-resolution image synthesis. In The Twelfth International Conference on Learning Representations, 2024. 3" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 56, + 611, + 294, + 676 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 611, + 294, + 676 + ], + "spans": [ + { + "bbox": [ + 56, + 611, + 294, + 676 + ], + "type": "text", + "content": "[37] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PMLR, 2021. 7" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 56, + 678, + 294, + 722 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 678, + 294, + 722 + ], + "spans": [ + { + "bbox": [ + 56, + 678, + 294, + 722 + ], + "type": "text", + "content": "[38] Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. Faster r-cnn: Towards real-time object detection with region proposal networks. IEEE transactions on pattern analysis and machine intelligence, 39(6):1137-1149, 2016. 1" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 316, + 72, + 555, + 721 + ], + "type": "list", + "angle": 0, + "index": 30, + "blocks": [ + { + "bbox": [ + 316, + 72, + 554, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 72, + 554, + 116 + ], + "spans": [ + { + "bbox": [ + 316, + 72, + 554, + 116 + ], + "type": "text", + "content": "[39] Sucheng Ren, Qihang Yu, Ju He, Xiaohui Shen, Alan Yuille, and Liang-Chieh Chen. Beyond next-token: Next-x prediction for autoregressive visual generation. arXiv preprint arXiv:2502.20388, 2025. 14" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 316, + 118, + 555, + 182 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 118, + 555, + 182 + ], + "spans": [ + { + "bbox": [ + 316, + 118, + 555, + 182 + ], + "type": "text", + "content": "[40] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 10684-10695, 2022. 1, 2, 3, 4, 5, 7, 8, 9, 13, 14" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 316, + 183, + 554, + 215 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 183, + 554, + 215 + ], + "spans": [ + { + "bbox": [ + 316, + 183, + 554, + 215 + ], + "type": "text", + "content": "[41] Leonid I. Rudin, Stanley Osher, and Emad Fatemi. Nonlinear total variation based noise removal algorithms. Physica D: Nonlinear Phenomena, 60(1):259-268, 1992. 13" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 316, + 217, + 554, + 258 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 217, + 554, + 258 + ], + "spans": [ + { + "bbox": [ + 316, + 217, + 554, + 258 + ], + "type": "text", + "content": "[42] Tim Salimans, Ian Goodfellow, Wojciech Zaremba, Vicki Cheung, Alec Radford, and Xi Chen. Improved techniques for training gans. Advances in neural information processing systems, 29, 2016. 5" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 316, + 260, + 554, + 293 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 260, + 554, + 293 + ], + "spans": [ + { + "bbox": [ + 316, + 260, + 554, + 293 + ], + "type": "text", + "content": "[43] Jaskirat Singh, Stephen Gould, and Liang Zheng. High-fidelity guided image synthesis with latent diffusion models. arXiv preprint arXiv:2211.17084, 2022. 3" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 316, + 293, + 554, + 336 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 293, + 554, + 336 + ], + "spans": [ + { + "bbox": [ + 316, + 293, + 554, + 336 + ], + "type": "text", + "content": "[44] Ivan Skorokhodov, Sharath Girish, Benran Hu, Willi Menapace, Yanyu Li, Rameen Abdal, Sergey Tulyakov, and Aliaksandr Siarohin. Improving the diffusability of autoencoders. arXiv preprint arXiv:2502.14831, 2025. 2, 7, 9" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 316, + 338, + 554, + 380 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 338, + 554, + 380 + ], + "spans": [ + { + "bbox": [ + 316, + 338, + 554, + 380 + ], + "type": "text", + "content": "[45] Peize Sun, Yi Jiang, Shoufa Chen, Shilong Zhang, Bingyue Peng, Ping Luo, and Zehuan Yuan. Autoregressive model beats diffusion: Llama for scalable image generation. arXiv preprint arXiv:2406.06525, 2024. 9" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 316, + 381, + 554, + 425 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 381, + 554, + 425 + ], + "spans": [ + { + "bbox": [ + 316, + 381, + 554, + 425 + ], + "type": "text", + "content": "[46] Keyu Tian, Yi Jiang, Zehuan Yuan, Bingyue Peng, and Liwei Wang. Visual autoregressive modeling: Scalable image generation via next-scale prediction. Advances in neural information processing systems, 37:84839-84865, 2025. 9" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 316, + 426, + 554, + 468 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 426, + 554, + 468 + ], + "spans": [ + { + "bbox": [ + 316, + 426, + 554, + 468 + ], + "type": "text", + "content": "[47] Arash Vahdat, Karsten Kreis, and Jan Kautz. Score-based generative modeling in latent space. In Advances in Neural Information Processing Systems, pages 11287-11302. Curran Associates, Inc., 2021. 3, 13" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 316, + 470, + 554, + 502 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 470, + 554, + 502 + ], + "spans": [ + { + "bbox": [ + 316, + 470, + 554, + 502 + ], + "type": "text", + "content": "[48] Aaron Van Den Oord, Oriol Vinyals, et al. Neural discrete representation learning. Advances in neural information processing systems, 30, 2017. 3" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 316, + 502, + 554, + 534 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 502, + 554, + 534 + ], + "spans": [ + { + "bbox": [ + 316, + 502, + 554, + 534 + ], + "type": "text", + "content": "[49] Shuai Wang, Zhi Tian, Weilin Huang, and Limin Wang. Ddt: Decoupled diffusion transformer. arXiv preprint arXiv:2504.05741, 2025. 14" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 316, + 536, + 554, + 567 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 536, + 554, + 567 + ], + "spans": [ + { + "bbox": [ + 316, + 536, + 554, + 567 + ], + "type": "text", + "content": "[50] Jingfeng Yao and Xinggang Wang. Reconstruction vs. generation: Taming optimization dilemma in latent diffusion models. arXiv preprint arXiv:2501.01423, 2025. 3, 7, 8, 9, 14" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 316, + 568, + 554, + 611 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 568, + 554, + 611 + ], + "spans": [ + { + "bbox": [ + 316, + 568, + 554, + 611 + ], + "type": "text", + "content": "[51] Jingfeng Yao, Wang Cheng, Wenyu Liu, and Xinggang Wang. Fasteredit: Towards faster diffusion transformers training without architecture modification. arXiv preprint arXiv:2410.10356, 2024. 6, 9" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 316, + 613, + 554, + 666 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 613, + 554, + 666 + ], + "spans": [ + { + "bbox": [ + 316, + 613, + 554, + 666 + ], + "type": "text", + "content": "[52] Lijun Yu, José Lezama, Nitesh B Gundavarapu, Luca Versari, Kihyuk Sohn, David Minnen, Yong Cheng, Vighnesh Birodkar, Agrim Gupta, Xiuye Gu, et al. Language model beats diffusion-tokenizer is key to visual generation. arXiv preprint arXiv:2310.05737, 2023. 9" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 316, + 667, + 554, + 721 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 667, + 554, + 721 + ], + "spans": [ + { + "bbox": [ + 316, + 667, + 554, + 721 + ], + "type": "text", + "content": "[53] Qihang Yu, Mark Weber, Xueqing Deng, Xiaohui Shen, Daniel Cremers, and Liang-Chieh Chen. An image is worth 32 tokens for reconstruction and generation. Advances in Neural Information Processing Systems, 37:128940-128966, 2025. 3" + } + ] + } + ], + "index": 29 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 31 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 72, + 294, + 206 + ], + "type": "list", + "angle": 0, + "index": 3, + "blocks": [ + { + "bbox": [ + 56, + 72, + 294, + 127 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 72, + 294, + 127 + ], + "spans": [ + { + "bbox": [ + 56, + 72, + 294, + 127 + ], + "type": "text", + "content": "[54] Sihyun Yu, Sangkyung Kwak, Huiwon Jang, Jongheon Jeong, Jonathan Huang, Jinwoo Shin, and Saining Xie. Representation alignment for generation: Training diffusion transformers is easier than you think. arXiv preprint arXiv:2410.06940, 2024. 2, 3, 4, 5, 6, 7, 8, 9, 13" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 56, + 129, + 294, + 172 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 129, + 294, + 172 + ], + "spans": [ + { + "bbox": [ + 56, + 129, + 294, + 172 + ], + "type": "text", + "content": "[55] Kaiwen Zha, Lijun Yu, Alireza Fathi, David A Ross, Cordelia Schmid, Dina Katabi, and Xiuye Gu. Language-guided image tokenization for generation. arXiv preprint arXiv:2412.05796, 2024. 3" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 56, + 174, + 294, + 206 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 174, + 294, + 206 + ], + "spans": [ + { + "bbox": [ + 56, + 174, + 294, + 206 + ], + "type": "text", + "content": "[56] Hongkai Zheng, Weili Nie, Arash Vahdat, and Anima Anandkumar. Fast training of diffusion models with masked transformers. arXiv preprint arXiv:2306.09305, 2023. 6, 9" + } + ] + } + ], + "index": 2 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 4 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 86, + 68, + 523, + 102 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 68, + 523, + 102 + ], + "spans": [ + { + "bbox": [ + 86, + 68, + 523, + 102 + ], + "type": "text", + "content": "REPA-E: Unlocking VAE for End-to-End Tuning with Latent Diffusion Transformers" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 233, + 112, + 376, + 129 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 233, + 112, + 376, + 129 + ], + "spans": [ + { + "bbox": [ + 233, + 112, + 376, + 129 + ], + "type": "text", + "content": "Supplementary Material" + } + ] + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 58, + 140, + 293, + 198 + ], + "blocks": [ + { + "bbox": [ + 58, + 140, + 293, + 198 + ], + "lines": [ + { + "bbox": [ + 58, + 140, + 293, + 198 + ], + "spans": [ + { + "bbox": [ + 58, + 140, + 293, + 198 + ], + "type": "table", + "html": "
Training StrategySpatial VarianceTotal Variation
w/o E2E Tuning17.066627.35
E2E w/ REPA Loss18.025516.14
E2E w/ Diff. Loss0.0289.80
", + "image_path": "bbdd03c0a00e3108ede5e16f8df6580adb828343ef7b9f14bbcf5aac90c1174e.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + }, + { + "bbox": [ + 55, + 205, + 295, + 327 + ], + "lines": [ + { + "bbox": [ + 55, + 205, + 295, + 327 + ], + "spans": [ + { + "bbox": [ + 55, + 205, + 295, + 327 + ], + "type": "text", + "content": "Table 10. Impact of Naive End-to-End Training with Diffusion Loss. We report total variation [41] and mean variance along each VAE latent channel for three training settings: 1) Standard LDM training (w/o end-to-end (E2E) tuning), 2) Naive E2E tuning with Diffusion loss, 3) E2E tuning with REPA loss [54]. All experiments use SDVAE for VAE initialization. We observe that using diffusion loss for end-to-end tuning encourages learning a simpler latent space with lower variance along the spatial dimensions (Fig. 3a). The simpler latent space is easier for denoising objective (" + }, + { + "bbox": [ + 55, + 205, + 295, + 327 + ], + "type": "inline_equation", + "content": "\\S 3.1" + }, + { + "bbox": [ + 55, + 205, + 295, + 327 + ], + "type": "text", + "content": "), but degrades final generation performance (Fig. 1). All results are reported at 400K iterations with SiT-XL/2 [30] as LDM." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_footnote" + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 347, + 285, + 361 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 347, + 285, + 361 + ], + "spans": [ + { + "bbox": [ + 55, + 347, + 285, + 361 + ], + "type": "text", + "content": "A. Impact of Diffusion Loss on Latent Space" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 367, + 295, + 486 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 367, + 295, + 486 + ], + "spans": [ + { + "bbox": [ + 55, + 367, + 295, + 486 + ], + "type": "text", + "content": "We analyze the effect of naively using diffusion loss for end-to-end tuning, focusing on how it alters the latent space structure. All experiments here use SD-VAE for tokenizer initialization and SiT-XL/2 [30] as the latent diffusion model, trained for 400K iterations without classifier-free guidance. We report two metrics to quantify latent structure, 1) Spatial Variance, computed as the mean per-channel variance across spatial dimensions, and 2) Total Variation [41], which captures local spatial differences in the latent map." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 487, + 295, + 571 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 487, + 295, + 571 + ], + "spans": [ + { + "bbox": [ + 55, + 487, + 295, + 571 + ], + "type": "text", + "content": "As shown in Tab. 10 and Fig. 3, directly backpropagating the diffusion loss leads to reduced spatial variance, which creates an easier denoising problem by hacking the latent space but leads to reduced image generation performance. In contrast, end-to-end training with REPA-E not only leads to improved generation performance but also improves the latent space structure for the underlying VAE (Fig. 3, 6)." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 582, + 174, + 596 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 582, + 174, + 596 + ], + "spans": [ + { + "bbox": [ + 55, + 582, + 174, + 596 + ], + "type": "text", + "content": "B. Additional Analysis" + } + ] + } + ], + "index": 7 + }, + { + "type": "table", + "bbox": [ + 56, + 613, + 295, + 669 + ], + "blocks": [ + { + "bbox": [ + 56, + 613, + 295, + 669 + ], + "lines": [ + { + "bbox": [ + 56, + 613, + 295, + 669 + ], + "spans": [ + { + "bbox": [ + 56, + 613, + 295, + 669 + ], + "type": "table", + "html": "
MethodgFID ↓sFID ↓IS ↑Prec. ↑Rec. ↑
REPA + E2E-Diffusion444.1460.31.490.000.00
REPA + E2E-LSGM9.895.07107.50.720.61
REPA-E (Ours)4.074.60161.80.760.62
", + "image_path": "84102d205259392e556ce48c91dd5529c425e9dd4d94c96963ebdc7eb5e385a1.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_body" + }, + { + "bbox": [ + 55, + 670, + 295, + 693 + ], + "lines": [ + { + "bbox": [ + 55, + 670, + 295, + 693 + ], + "spans": [ + { + "bbox": [ + 55, + 670, + 295, + 693 + ], + "type": "text", + "content": "Table 11. Comparison with LSGM Objective. REPA-E shows better generation performance and convergence speed." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_footnote" + } + ], + "index": 8 + }, + { + "bbox": [ + 55, + 698, + 296, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 698, + 296, + 723 + ], + "spans": [ + { + "bbox": [ + 55, + 698, + 296, + 723 + ], + "type": "text", + "content": "Comparison of End-to-End Training Objectives. We provide additional results comparing different objectives for" + } + ] + } + ], + "index": 10 + }, + { + "type": "table", + "bbox": [ + 315, + 140, + 555, + 186 + ], + "blocks": [ + { + "bbox": [ + 315, + 140, + 555, + 186 + ], + "lines": [ + { + "bbox": [ + 315, + 140, + 555, + 186 + ], + "spans": [ + { + "bbox": [ + 315, + 140, + 555, + 186 + ], + "type": "table", + "html": "
MethodgFID↓sFID↓IS↑Prec.↑Rec.↑
REPA + SiT-L22.25.6858.30.740.60
REPA-E + SiT-L12.84.6090.60.790.61
", + "image_path": "fb38cb45bdc201bce864203f0564b39c6f26db98d3284c2cba327117327406b1.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "table_body" + }, + { + "bbox": [ + 313, + 188, + 555, + 243 + ], + "lines": [ + { + "bbox": [ + 313, + 188, + 555, + 243 + ], + "spans": [ + { + "bbox": [ + 313, + 188, + 555, + 243 + ], + "type": "text", + "content": "Table 12. Scaling REPA-E to Higher Resolution. System-level results on ImageNet-512 with " + }, + { + "bbox": [ + 313, + 188, + 555, + 243 + ], + "type": "inline_equation", + "content": "64 \\times 64" + }, + { + "bbox": [ + 313, + 188, + 555, + 243 + ], + "type": "text", + "content": " latents using SiT-L at 100K steps without classifier-free guidance. We observe that REPA-E leads to significant performance improvements over vanilla-REPA [54] even at high resolutions." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "table_footnote" + } + ], + "index": 11 + }, + { + "type": "table", + "bbox": [ + 315, + 251, + 553, + 293 + ], + "blocks": [ + { + "bbox": [ + 315, + 251, + 553, + 293 + ], + "lines": [ + { + "bbox": [ + 315, + 251, + 553, + 293 + ], + "spans": [ + { + "bbox": [ + 315, + 251, + 553, + 293 + ], + "type": "table", + "html": "
SamplerODE, NFE=50SDE, NFE=250
gFIDVA-VAE 5.43E2E-VAE 5.02VA-VAE 5.57E2E-VAE 4.97
", + "image_path": "9c9161a3c487fa16b6c142d4839b37e74004fe3be79b441c86019cad3af5488a.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "table_body" + }, + { + "bbox": [ + 313, + 294, + 554, + 338 + ], + "lines": [ + { + "bbox": [ + 313, + 294, + 554, + 338 + ], + "spans": [ + { + "bbox": [ + 313, + 294, + 554, + 338 + ], + "type": "text", + "content": "Table 13. Generalization to T2I Tasks. FID results on MSCOCO text-to-image generation using MMDiT + REPA. We find that end-to-end tuned VAEs (E2E-VAE) also generalizes to T2I tasks showing improved generation performance." + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "table_footnote" + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 357, + 554, + 429 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 357, + 554, + 429 + ], + "spans": [ + { + "bbox": [ + 313, + 357, + 554, + 429 + ], + "type": "text", + "content": "end-to-end training of VAE and LDM. Specifically, we evaluate: 1) naive E2E training by backpropagating diffusion loss to VAE encoder, 2) the LSGM entropy-regularized objective [47], 3) our proposed REPA-E. All methods are trained with SiT-XL for 400K steps under consistent settings." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 430, + 554, + 491 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 430, + 554, + 491 + ], + "spans": [ + { + "bbox": [ + 313, + 430, + 554, + 491 + ], + "type": "text", + "content": "The LSGM objective prevents feature collapse by maximizing entropy of the latent space. However, as shown in Tab. 11, our REPA-E formulation yields better performance across all metrics at just " + }, + { + "bbox": [ + 313, + 430, + 554, + 491 + ], + "type": "inline_equation", + "content": "400\\mathrm{K}" + }, + { + "bbox": [ + 313, + 430, + 554, + 491 + ], + "type": "text", + "content": " steps, with significantly faster convergence and stronger generation quality." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 313, + 492, + 554, + 589 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 492, + 554, + 589 + ], + "spans": [ + { + "bbox": [ + 313, + 492, + 554, + 589 + ], + "type": "text", + "content": "Scaling REPA-E to Higher Latent Resolution. We conduct experiments on ImageNet-512 [6] to evaluate the performance of REPA-E under higher-resolution latent settings " + }, + { + "bbox": [ + 313, + 492, + 554, + 589 + ], + "type": "inline_equation", + "content": "(64 \\times 64)" + }, + { + "bbox": [ + 313, + 492, + 554, + 589 + ], + "type": "text", + "content": ". We use SD-VAE [40] as the tokenizer and SiT-L as the diffusion model, trained for 100K steps and we report the performance without classifier-free guidance. As shown in Tab. 12, our approach yields significant improvements in generation quality compared to REPA." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 313, + 590, + 555, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 590, + 555, + 723 + ], + "spans": [ + { + "bbox": [ + 313, + 590, + 555, + 723 + ], + "type": "text", + "content": "MSCOCO Text-to-Image Generation with E2E-VAE. To further evaluate the utility of the tuned VAE beyond ImageNet, we assess its performance in a text-to-image generation (T2I) setting on MSCOCO [28]. Following REPA [54], we adopt MMDiT [10] as the diffusion backbone and apply REPA loss across all variants. All models are trained for 100K steps and evaluated using classifier-free guidance with " + }, + { + "bbox": [ + 313, + 590, + 555, + 723 + ], + "type": "inline_equation", + "content": "\\alpha_{\\mathrm{cfg}} = 2.0" + }, + { + "bbox": [ + 313, + 590, + 555, + 723 + ], + "type": "text", + "content": " and EMA weights during inference. We report generation FID, and observe that replacing VA-VAE with our E2E-VAE consistently improves downstream text-to-image generation quality (Tab. 13)." + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 56, + 70, + 294, + 169 + ], + "blocks": [ + { + "bbox": [ + 56, + 70, + 294, + 169 + ], + "lines": [ + { + "bbox": [ + 56, + 70, + 294, + 169 + ], + "spans": [ + { + "bbox": [ + 56, + 70, + 294, + 169 + ], + "type": "table", + "html": "
AutoencoderPSNR↑SSIM↑LPIPS↓rFID↓
SD-VAE [40]25.670.720.130.74
+REPA-E (Ours)24.840.710.150.53
IN-VAE (f16d32)27.400.800.090.26
+REPA-E (Ours)26.870.780.110.27
VA-VAE [50]26.320.760.110.28
+REPA-E (Ours)26.250.750.110.28
", + "image_path": "f4c6791c5721f3d932ba8c2d44bb386e00abe01bc7069b0e7e53337554bc6967.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + }, + { + "bbox": [ + 56, + 172, + 295, + 216 + ], + "lines": [ + { + "bbox": [ + 56, + 172, + 295, + 216 + ], + "spans": [ + { + "bbox": [ + 56, + 172, + 295, + 216 + ], + "type": "text", + "content": "Table 14. VAE Reconstruction Evaluation on ImageNet-256. While REPA-E primarily improves the generative capability of the VAE (see Tab. 9), it also maintains competitive reconstruction quality across all metrics." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_footnote" + } + ], + "index": 0 + }, + { + "bbox": [ + 58, + 236, + 217, + 249 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 236, + 217, + 249 + ], + "spans": [ + { + "bbox": [ + 58, + 236, + 217, + 249 + ], + "type": "text", + "content": "C. Remarks on FID Evaluation" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 57, + 256, + 294, + 399 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 256, + 294, + 399 + ], + "spans": [ + { + "bbox": [ + 57, + 256, + 294, + 399 + ], + "type": "text", + "content": "Throughout the paper, we follow the standard ImageNet conditional evaluation protocol, where 50,000 images are generated by randomly sampling class labels. Recent papers [27, 39, 49] have adopted class-balanced generation for evaluation, where 50 images per class are generated across the 1,000 categories. To our surprise, we found that using class-balanced sampling yields slightly better FID performance. Therefore, for the results in Tab. 9, we adopt this class-balanced sampling strategy. Accordingly, all representation alignment methods at the 800-epoch checkpoint in this table are computed under the balanced sampling protocol to ensure a fair and consistent comparison." + } + ] + } + ], + "index": 3 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 301, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 301, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 301, + 741, + 310, + 750 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 4 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2504_10xxx/2504.10685/2b7c0cf2-f712-45f3-86c5-afe1fcf3d48b_content_list.json b/data/2025/2504_10xxx/2504.10685/2b7c0cf2-f712-45f3-86c5-afe1fcf3d48b_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..31e511211872e0b8e1e9b02e497fa9667cf7b777 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10685/2b7c0cf2-f712-45f3-86c5-afe1fcf3d48b_content_list.json @@ -0,0 +1,4722 @@ +[ + { + "type": "text", + "text": "NTIRE 2025 Challenge on Cross-Domain Few-Shot Object Detection: Methods and Results", + "text_level": 1, + "bbox": [ + 148, + 128, + 848, + 174 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Yuqian Fu\\* Xingyu Qiu\\* Bin Ren\\* Yanwei Fu\\* Radu Timofte\\* Nicu Sebe\\* Ming-Hsuan Yang\\* Luc Van Gool\\* Kaijin Zhang Qingpeng Nong Xiugang Dong Hong Gao Xiangsheng Zhou Jiancheng Pan Yanxing Liu Xiao He Jiahao Li Yuze Sun Xiaomeng Huang Zhenyu Zhang Ran Ma Yuhan Liu Zijian Zhuang Shuai Yi Yixiong Zou Lingyi Hong Mingxi Chen Runze Li Xingdong Sheng Wenqiang Zhang Weisen Chen Yongxin Yan Xinguo Chen Yuanjie Shao Zhengrong Zuo Nong Sang Hao Wu Haoran Sun Shuming Hu Yan Zhang Zhiguang Shi Yu Zhang Chao Chen Tao Wang Da Feng Linhai Zhuo Ziming Lin Yali Huang Jie Me Yiming Yang Mi Guo Mingyuan Jiu Mingliang Xu Maomao Xiong Qunshu Zhang Xinyu Cao Yuqing Yang Dianmo Sheng Xuanpu Zhao Zhiyu Li Xuyang Ding Wenqian Li", + "bbox": [ + 148, + 204, + 866, + 401 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 248, + 431, + 326, + 448 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Cross-Domain Few-Shot Object Detection (CD-FSOD) poses significant challenges to existing object detection and few-shot detection models when applied across domains. In conjunction with NTIRE 2025, we organized the 1st CD-FSOD Challenge, aiming to advance the performance of current object detectors on entirely novel target domains with only limited labeled data. The challenge attracted 152 registered participants, received submissions from 42 teams, and concluded with 13 teams making valid final submissions. Participants approached the task from diverse perspectives, proposing novel models that achieved new state-of-the-art (SOTA) results under both open-source and closed-source settings. In this report, we present an overview of the 1st NTIRE 2025 CD-FSOD Challenge, highlighting the proposed solutions and summarizing the results submitted by the participants.", + "bbox": [ + 88, + 464, + 485, + 705 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 89, + 733, + 220, + 750 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Few-shot object detection (FSOD) [28] aims at allowing models to detect novel objects using minimal labeled examples. While significant progress has been made, existing FSOD methods [53, 63, 64, 68, 75, 85] typically as", + "bbox": [ + 89, + 758, + 482, + 820 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "sume that the training (source) and testing (target) data are drawn from the same domain. However, this assumption rarely holds in real-world applications. For instance, a model trained on natural images such as those in MS-COCO [41] may face substantial challenges when applied to a novel domain like remote sensing imagery. This cross-domain few-shot learning (CD-FSL) problem has attracted considerable attention in the context of classification [12-14, 18, 36, 55, 56, 71, 72, 83, 84, 86, 87], whereas its extension to object detection—i.e., cross-domain few-shot object detection (CD-FSOD)—remains much less explored.", + "bbox": [ + 511, + 434, + 906, + 601 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Upon gaping at this gap, one recent work, CD-ViTO [15], reveals that the different object detection datasets exhibit various characters in style, inter-class variance (ICV), and indefinable boundaries (IB). To further investigate how these factors affect the CD-FSOD, CD-ViTO thus proposes a new benchmark which takes MS-COCO as the source domain and six distinct datasets with diverse style, ICV, IB as unseen targets. Results indicate that the prior detectors all fail to generalize to those targets when the domain gap issue is observed.", + "bbox": [ + 511, + 606, + 908, + 758 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "To further promote the advances on CD-FSOD, we newly introduce three more unseen targets, DeepFruits [60], Carpk [20], and CarDD [76] as testbeds for the CD-FSOD detectors. Following the observations in CD-ViTO, these three targets have domains different from the source data, with varying styles, ICV, and IB. Furthermore, to maximally boost the performance of models, we define the task setting proposed in CD-ViTO as closed-source CD-FSOD, while further introducing the new open-source CD-FSOD", + "bbox": [ + 511, + 763, + 908, + 902 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.10685v1 [cs.CV] 14 Apr 2025", + "bbox": [ + 22, + 262, + 60, + 705 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "* Yuqian Fu, Xingyu Qiu, Bin Ren, Yanwei Fu, Radu Timofte, Nicu Sebe, Ming-Hsuan Yang, and Luc Van Gool are the NTIRE2025 challenge organizers. The other authors are participants in this challenge. Appendix A contains the authors' team names and affiliations. NTIRE2025 webpage: https://cvlai.net/ntire/2025/. Challenge Codes: https://github.com/lovelyqian/NTIRE2025_CDFSOD.", + "bbox": [ + 89, + 825, + 482, + 900 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 493, + 924, + 503, + 935 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "setting. To be specific, the closed-source setting means the source data for model training is strictly limited, e.g., MS-COCO as in CD-ViTO; while the open-source setting relaxes this limitation and allows the participants to leverage diverse knowledge sources and foundation models to explore the upper bound on the target domains.", + "bbox": [ + 89, + 90, + 480, + 181 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In collaboration with the 2025 New Trends in Image Restoration and Enhancement (NTIRE 2025) Workshop, which is particularly interested in the model robustness under changing conditions, we present the 1st CD-FSOD Challenge. It features an open-source CD-FSOD as the main track and a closed-source CD-FSOD as a special track. For the closed-source track, MS-COCO serves as the sole source domain. The validation phase includes six target domains proposed in CD-ViTO. Three additional novel domains are used as the final test sets for both tracks. Mean Average Precision (mAP) is employed as the ranking metric. We believe this challenge will drive progress in the CD-FSOD field and foster meaningful algorithmic innovations.", + "bbox": [ + 89, + 181, + 480, + 378 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "This challenge is one of the NTIRE $2025^{1}$ Workshop associated challenges on: ambient lighting normalization [74], reflection removal in the wild [81], shadow removal [73], event-based image deblurring [69], image denoising [70], XGC quality assessment [44], UGC video enhancement [61], night photography rendering [9], image super-resolution (x4) [3], real-world face restoration [4], efficient super-resolution [57], HR depth estimation [82], efficient burst HDR and restoration [32], cross-domain few-shot object detection [16], short-form UGC video quality assessment and enhancement [38, 39], text to image generation model quality assessment [19], day and night raindrop removal for dual-focused images [37], video quality assessment for video conferencing [24], low light image enhancement [45], light field super-resolution [77], restore any image model (RAIM) in the wild [40], raw restoration and super-resolution [5], and raw reconstruction from RGB on smartphones [6].", + "bbox": [ + 89, + 378, + 482, + 650 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. NTIRE 2025 CD-FSOD Challenge", + "text_level": 1, + "bbox": [ + 89, + 662, + 401, + 680 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.1. Challenge Overview", + "text_level": 1, + "bbox": [ + 89, + 688, + 282, + 704 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Our challenge aims to advance Cross-Domain Few-Shot Object Detection (CD-FSOD) — detecting objects under domain shifts with limited labeled data. We use six previously published target domains [15] as validation sets and introduce three newly constructed datasets for final testing. Beyond the dataset update, we introduce open-source CD-FSOD as a new setting, allowing participants to freely choose source datasets and pre-trained models to enhance generalization. Fig. 1 illustrates both the predefined closed-source CD-FSOD and the new open-source CD-FSOD settings, along with the newly introduced target domains.", + "bbox": [ + 89, + 710, + 482, + 876 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.2. Task Formulations", + "text_level": 1, + "bbox": [ + 513, + 90, + 694, + 104 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Closed-Source CD-FSOD. Given a source dataset $\\mathcal{D}_S$ and a novel target dataset $\\mathcal{D}_T$ , the closed-source CD-FSOD track assumes that the source class set $\\mathcal{C}_S$ and the target class set $\\mathcal{C}_T$ are completely disjoint, i.e., $\\mathcal{C}_S \\cap \\mathcal{C}_T = \\emptyset$ . Additionally, the distributions of the source domain $\\mathcal{D}_S$ and the target domain $\\mathcal{D}_T$ are not identical. Participants are required to train models on $\\mathcal{D}_S$ and test them on $\\mathcal{D}_T$ , where each class in $\\mathcal{C}_T$ has only a few labeled examples. Usually, $\\mathcal{D}_S$ is a single dataset, as in CD-ViTO [15]. We refer to this setting as closed-source CD-FSOD to differentiate it from the open-source variant.", + "bbox": [ + 511, + 112, + 903, + 277 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Open-Source CD-FSOD. In contrast to the closed-source setting where training data is strictly limited, the open-source CD-FSOD track is designed to leverage the capabilities of foundation models. Since these models are pretrained on large-scale and diverse datasets, it is practically hard to trace all the knowledge embedded within them. Hence, we refer to this setting as open-source. While the relaxed constraints on source data make it difficult to strictly ensure non-overlapping classes between the source and target data, the track still focuses on addressing the core challenges of domain shift and few-shot object detection. We believe this setting will significantly accelerate the development of CD-FSOD methods for real-world applications.", + "bbox": [ + 511, + 279, + 903, + 474 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In this challenge, the open-source CD-FSOD is designated as the main track, with awards presented to the top three teams. The closed-source CD-FSOD serves as the special track, with a single award granted to the top-performing team.", + "bbox": [ + 511, + 476, + 903, + 550 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "$N$ -way $K$ -shot Protocol. We adopt the $N$ -way $K$ -shot evaluation protocol. For each novel class in the target class set $\\mathcal{C}_T$ , $K$ labeled instances are provided, forming the support set $S$ . The remaining unlabeled instances constitute the query set $Q$ . Instances contained in the support set $S$ are used to assist the model in recognizing and detecting the objects in $Q$ .", + "bbox": [ + 511, + 551, + 903, + 656 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.3. Challenge Phases and Datasets", + "text_level": 1, + "bbox": [ + 513, + 666, + 785, + 681 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "This challenge involves one development stage and one testing stage. The source data $\\mathcal{D}_S$ for both stages is the same, i.e., MS-COCO [41] for the closed-source track and unlimited data for the open-source track. While the testing data $\\mathcal{D}_T$ is different.", + "bbox": [ + 511, + 688, + 903, + 763 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Development Stage: Datasets proposed in the CD-ViTO, including ArTaxOr [8], Clipart1K [23], DIOR [34], Deep-Fish [62], NEU-DET [67], and UODD [26] are taken as targets $\\mathcal{D}_T$ during development stage.", + "bbox": [ + 511, + 763, + 903, + 825 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Testing Stage. Three previously unseen datasets (DeepFruits [60], Carpk [20], and CarDD [76]) are introduced and used as the targets $\\mathcal{D}_T$ for the final testing phase. Note that the ground truth annotations for these query sets are held exclusively by the challenge organizers.", + "bbox": [ + 511, + 825, + 903, + 900 + ], + "page_idx": 1 + }, + { + "type": "page_footnote", + "text": "1https://www.cvlai.net/ntire/2025/", + "bbox": [ + 107, + 886, + 377, + 898 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 493, + 924, + 504, + 935 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/1bc0d3ab7ab85fb2208d5f61d937c337f5fbc4fba1fc2687c691f111074cfeb2.jpg", + "image_caption": [ + "Figure 1. Illustration of the challenge settings, including the closed-source and open-source CD-FSOD tracks. The three newly introduced target datasets used in the final testing phase are also shown." + ], + "image_footnote": [], + "bbox": [ + 91, + 94, + 903, + 270 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.4. CD-ViTO Baseline Model", + "text_level": 1, + "bbox": [ + 89, + 339, + 326, + 356 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We take CD-ViTO, the current State-of-the-art (SOTA) method under the closed-source setting, as the baseline for this challenge. Briefly, CD-ViTO is built upon DE-ViT [85], an open-set detector, and fine-tuned using the support set. As in Fig. 2, modules in blue are inherited from DE-ViT, while modules in orange are newly proposed. New improvements include learnable instance features, instance reweighting, domain prompter, and finetuning.", + "bbox": [ + 89, + 362, + 483, + 484 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/db3a8fdfeee41c36fdb097170cdd5cbd99260e8264a29ec9a9b48b94f98c62f1.jpg", + "image_caption": [ + "Figure 2. Overall framework of CD-ViTO baseline method." + ], + "image_footnote": [], + "bbox": [ + 94, + 496, + 483, + 631 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Intuitively, the learnable instance feature module is designed to enhance inter-class variance (ICV) among different target classes by making the initially fixed instance features learnable and optimizing them through supervised few-shot detection tasks on the target support set. The instance reweighting module further improves prototype quality by assigning higher weights to high-quality object instances—e.g., those with minimal indefinable boundary (IB). These weights are learned via a lightweight MLP and fully connected layer, as illustrated in the upper part of Fig. 2(b). The domain prompter module introduces learnable domain perturbations to simulate varying domain styles. These perturbations are applied to object prototypes, followed by a prototype consistency loss to ensure that the introduced perturbations do not affect the seman-", + "bbox": [ + 89, + 674, + 483, + 900 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "tic category of the prototypes. Simultaneously, a domain diversity loss encourages the generated domains to be sufficiently diverse. The lower part of Fig. 2(b) illustrates this mechanism. By injecting virtual domains and enforcing robustness against the induced perturbations, this strategy enhances the model's generalization under domain shifts. Finetuning is applied to the modules highlighted with fire icons in Fig. 2.", + "bbox": [ + 511, + 340, + 906, + 464 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.5. Evaluation Protocol", + "text_level": 1, + "bbox": [ + 511, + 483, + 702, + 500 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The final score is measured based on the model's performance on the three datasets of the testing stage. For each dataset, we validate the models on three different few-shot settings: 1-shot, 5-shot, and 10-shot. This results in a total of nine mean Average Precision (mAP) scores: D1_1shot, D1_5shot, D1_10shot; D2_1shot, D2_5shot, D2_10shot; and D3_1shot, D3_5shot, D3_10shot. The D1, D2, D3 denote the Deep-Fruits, Carpk, and CarDD, respectively.", + "bbox": [ + 511, + 510, + 906, + 647 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The final ranking score is computed as a weighted average avg() of these scores:", + "bbox": [ + 511, + 650, + 906, + 681 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\text {S c o r e} = 2 * \\text {a v g} (\\mathrm {D} 1 _ {-} 1 \\text {s h o t}, \\mathrm {D} 2 _ {-} 1 \\text {s h o t}, \\mathrm {D} 3 _ {-} 1 \\text {s h o t}) \\\\ + 1 * a v g (D 1 \\_ 5 s h o t, D 2 \\_ 5 s h o t, D 3 \\_ 5 s h o t) \\\\ + 1 * a v g (D 1. 1 0 s h o t, D 2. 1 0 s h o t, D 3. 1 0 s h o t) \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 511, + 700, + 916, + 753 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Rationale for Weighted Scoring. We assign a higher weight $(\\times 2)$ to the 1-shot setting for two primary reasons: (1) Performance in the 1-shot scenario is generally lower than in the 5-shot and 10-shot settings due to the limited availability of labeled examples for adaptation; and (2) emphasizing 1-shot performance encourages the development of models that are more robust and effective in extremely low-data conditions.", + "bbox": [ + 511, + 779, + 906, + 900 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 924, + 503, + 935 + ], + "page_idx": 2 + }, + { + "type": "table", + "img_path": "images/e6bd5a1a7ac2872e0e9ebd77446c8317fcdc5f074f12be87eed64ee98bdfa6ce.jpg", + "table_caption": [ + "Table 1. Open-source and closed-source results on CD-FSOD. D1, D2, and D3 represent DeepFruits, CARPK, and CarDD, respectively. Mean Average Precision (mAP) on 1-shot, 5-shot, and 10-shot are reported. Teams achieving top results are highlighted." + ], + "table_footnote": [], + "table_body": "
Main Open-Source Track
RankTeam NameScoreD1_1shotD1_5shotD1_10shotD2_1shotD2_5shotD2_10shotD3_1shotD3_5shotD3_10shot
1MoveFree231.0166.1864.5862.5760.4358.8959.0048.7549.2848.00
2AI4EarthLab215.9261.1965.4165.3559.1558.0559.0034.2143.8547.00
3IDCFS215.4863.3465.4164.7561.1460.4260.0032.3339.2443.00
4FDUROILab_Lenovo211.5561.2562.8964.6659.2459.2459.0035.1337.6340.00
5HUSTLab210.7863.7161.3257.1960.4260.4760.0031.0140.0943.00
6TongjiLab172.1442.3641.9041.7455.9555.9555.0031.4031.4031.00
7Manifold159.8632.0544.2844.2757.0657.0657.0018.7129.3432.00
8MXT108.2022.2640.5741.3421.1226.3430.2323.8128.0029.00
Special Closed-Source Track
RankTeam NameScoreD1_1shotD1_5shotD1_10shotD2_1shotD2_5shotD2_10shotD3_1shotD3_5shotD3_10shot
1X-Few125.9036.5846.9550.9823.0129.6828.0020.1129.6833.00
2MM117.3932.4745.2350.2318.8329.3628.0018.3129.1431.00
3FSV112.8131.2343.8949.3213.6926.0426.5919.7130.1633.17
4IPC105.6232.5847.1245.6413.4120.7713.0018.1829.9832.00
5LJY105.2833.5246.0445.3410.6811.4525.0018.3430.9432.00
/CD-ViTO Base [15]91.0027.9537.4243.586.7721.2824.0010.0726.4730.00
", + "bbox": [ + 94, + 127, + 906, + 343 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3. Challenge Results", + "text_level": 1, + "bbox": [ + 89, + 364, + 266, + 383 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Among the 152 registered participants, 8 and 5 teams have participated the final testing stage and submitted their results, codes, and factsheets. Table. 1 summarizes the results of these methods. Detailed descriptions of the participants' solutions are provided in Sec.4 and Sec.5, each corresponding to a different track.", + "bbox": [ + 89, + 392, + 482, + 482 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Open-Source Track Results. In the open-source track, nearly all participating teams achieved strong performance with clear improvements over the provided CD-ViTO baseline. This highlights not only the effectiveness of their proposed methods but also the significance of introducing this new task setting. As observed, relaxing the strict limitation on the source data offers a substantial advantage in tackling the CD-FSOD task.", + "bbox": [ + 89, + 486, + 482, + 604 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Specifically, the teams MoveFree, AI4EarthLab, and IDCFS emerged as the top performers in this track, achieving scores of 231.01, 215.92, and 215.48, respectively—significantly surpassing the baseline and other teams under the same track.", + "bbox": [ + 89, + 609, + 482, + 684 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Closed-Source Track Results. The performance achieved by the closed-source track teams is generally lower than that of the open-source track. This is quite understandable considering that the closed-source track enforces stricter constraints. Nevertheless, the participants managed to improve the baseline method clearly.", + "bbox": [ + 89, + 686, + 482, + 777 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In particular, the X-Few team stands out with a final score of 125.90, significantly outperforming other competitors. This shows that well-designed architectures and training strategies can still bring notable gains even without relying on large external models. Other teams in this track also delivered solid improvements. Their contributions are valuable in terms of enabling fair comparisons and emphasizing algorithmic annotations.", + "bbox": [ + 89, + 780, + 482, + 901 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4. Main Open-Source Track Methods", + "text_level": 1, + "bbox": [ + 511, + 364, + 830, + 382 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4.1. MoveFree", + "text_level": 1, + "bbox": [ + 511, + 390, + 627, + 404 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4.1.1. Proposed Method", + "text_level": 1, + "bbox": [ + 511, + 414, + 683, + 429 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Open-set object detectors, such as [35], [43], and [58], are designed to detect objects based on arbitrary text descriptions. These models are typically pre-trained on large-scale, well-annotated datasets, ensuring strong alignment between textual and visual modalities. As a result, they exhibit remarkable zero-shot capabilities, allowing them to recognize and localize unseen object categories based solely on textual prompts. Given the strong generalization ability of such open-set detectors, this team believes that they are inherently well-suited for cross-domain few-shot object detection, as their robust pre-trained representations can be effectively adapted to new domains with minimal supervision.", + "bbox": [ + 511, + 433, + 906, + 616 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Thus, the MoveFree team focuses on leveraging and enhancing pre-trained open-set object detectors for CD-FSOD during the fine-tuning stage. The proposed approach introduces three key improvements: (1) To address the issue of missing annotations, self-training is introduced to iteratively refine the training data, thereby enhancing fine-tuning performance. (2) A Mixture-of-Experts (MoE) architecture is integrated into the open-set object detector to improve adaptability and robustness in the few-shot setting. (3) A two-stage fine-tuning pipeline is designed carefully. Code is made available2.", + "bbox": [ + 511, + 616, + 908, + 781 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Self-training Paradigm. According to the definition of few-shot object detection in CD-ViTO[15], $K$ -shot object detection refers to having $K$ labeled instances in the training data, rather than $K$ fully annotated images. This implies that instances of target categories may lack annotations in the provided training set.", + "bbox": [ + 511, + 782, + 908, + 875 + ], + "page_idx": 3 + }, + { + "type": "page_footnote", + "text": "2https://github.com/KAIJINZ228/Few_Shot_GD", + "bbox": [ + 529, + 886, + 862, + 900 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 493, + 924, + 504, + 935 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Upon careful investigation, this team identified that the issue of incomplete annotations is prominent across all three test datasets in this challenge. Drawing on their expertise in developing open-set object detectors, the team recognized that missing annotations for target categories can significantly degrade model performance. This degradation occurs because the loss function penalizes the model for correctly detecting unannotated objects, mistakenly treating them as false positives due to their absence in the ground truth labels. Therefore, this team employs a self-training strategy during the fine-tuning stage of Grounding DINO to iteratively refine the annotations in the training data. Specifically, Grounding DINO periodically generates predictions on the training set, which are then incorporated as additional annotations. This iterative process gradually improves the quality of the training data, ultimately leading to enhanced model performance.", + "bbox": [ + 89, + 90, + 483, + 347 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The substitution of the Mixture-of-Experts (MoE). In few-shot object detection, the availability of training data is highly limited. Therefore, maximizing the object detector's ability to extract supervision from this scarce data is crucial during the fine-tuning stage. In this challenge, beyond the few-shot constraint, the cross-domain setting further increases the difficulty, as detectors usually require additional supervision to effectively adapt to a new domain.", + "bbox": [ + 89, + 347, + 483, + 468 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The core concept of the MoE architecture is to enable different components (i.e., experts) of a model to specialize in different aspects of the data [2]. In recent years, MoE has gained popularity in multi-modal models, including Mistral [25] and DeepSeek-V2 [42]. A common application of MoE in such models is replacing the traditional feedforward network (FFN) with an MoE-based variant, as seen in Switch Transformer [10] and OpenMoe [80].", + "bbox": [ + 89, + 468, + 483, + 589 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "To maximize supervision and enable the model to learn effectively from the limited training data, this team integrates a Mixture-of-Experts (MoE) mechanism into Grounding DINO during the fine-tuning stage. The MoE framework allows different experts to specialize in distinct aspects of the data, facilitating the capture of more diverse and informative representations. It is hypothesized that this capability helps Grounding DINO better adapt to the target domain while making more efficient use of the available training data.", + "bbox": [ + 89, + 589, + 483, + 739 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In this team's approach, the MoE mechanism is incorporated into the feed-forward network (FFN) layers of Grounding DINO's Cross-Modality Decoder. As illustrated in Figure 3, the MoE architecture consists of one shared expert and three router-selected experts.", + "bbox": [ + 89, + 739, + 483, + 816 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.1.2. Training Details", + "text_level": 1, + "bbox": [ + 89, + 821, + 250, + 835 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "A two-stage fine-tuning pipeline is adopted to adapt Grounding DINO for cross-domain few-shot object detection. In the first stage, the standard Grounding DINO (without the MoE substitution) is fine-tuned on the training data,", + "bbox": [ + 89, + 840, + 483, + 900 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/0b7da84e2775e9ddbfcbf86d3acb6b3e314368a8cf2dd3dcc8102d0f04f792a6.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 576, + 127, + 888, + 325 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/e99f2e71e525eaa1944ac9b617b933a5aa42cf899e9ed92b704bb56a99e68e67.jpg", + "image_caption": [ + "Figure 3. Team MoveFree: an illustration of the substitution of MoE into Grounding DINO's decoder layers." + ], + "image_footnote": [], + "bbox": [ + 576, + 332, + 880, + 500 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "with all parameters trainable except for the language encoder. In the second stage, the MoE architecture is introduced into the model.", + "bbox": [ + 511, + 582, + 906, + 627 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "For the second stage, the model is initialized using the weights obtained from the first stage, excluding the MoE components. The shared expert within the MoE is initialized with weights from the first stage, while the three router-selected experts are initialized using the open-source pre-trained weights of Grounding DINO. This initialization strategy facilitates effective learning from limited training data while retaining knowledge acquired during the initial stage. During this phase, only the MoE components and the detection head remain trainable, with all other parts of the model kept frozen.", + "bbox": [ + 511, + 628, + 908, + 794 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Additionally, the self-supervised learning paradigm is applied in both stages to iteratively refine the training data and enhance performance. The training strictly adheres to the provided few-shot training set, without utilizing any external data. The overall approach is computationally efficient and can be executed on a single V100 GPU within a reasonable time frame.", + "bbox": [ + 511, + 795, + 908, + 898 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 924, + 503, + 935 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.2.AI4EarthLab", + "text_level": 1, + "bbox": [ + 89, + 90, + 233, + 104 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.2.1. Proposed Method", + "text_level": 1, + "bbox": [ + 89, + 112, + 261, + 127 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Foundation models pretrained on large-scale datasets, such as GroundingDINO [43] and LAE-DINO [51], have demonstrated strong detection performance in cross-domain zero-shot and few-shot object detection tasks. Thus, the AI4EarthLab team is motivated to explore such foundation models for CD-FSOD.", + "bbox": [ + 89, + 131, + 482, + 220 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "As shown in Fig. 4, this team proposes an augmentation-search strategy for CD-FSOD, which leverages open-source data and transfers the model to novel target domains. Following the approaches in [15, 52], an efficient fine-tuning method is adopted to explore the cross-domain few-shot detection capabilities of foundation models, requiring only lightweight tuning to identify effective subfields. Code is made available3.", + "bbox": [ + 89, + 223, + 483, + 342 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/43d8c11f2a43152c39a45bf877db09a41ebc256d1d670b340e4ee5df4386d35a.jpg", + "image_caption": [ + "Figure 4. Team AI4EarthLab: overall framework of augmentation-search strategy Enhance Then Search (ETS) with foundation model for CD-FSOD." + ], + "image_footnote": [], + "bbox": [ + 91, + 354, + 480, + 470 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Data augmentation has proven effective in reducing semantic confusion during few-shot fine-tuning, particularly in cases where categories—such as certain fruits—are visually and semantically similar. Through extensive few-shot experiments, it is observed that integrating image-based augmentation with optimal domain search strategies can further enhance the performance of foundation models, though their upper performance bound remains uncertain. Building upon the open-source Grounding DINO framework, several commonly used image augmentation techniques are incorporated, and specific optimization objectives are defined to efficiently search for optimal subdomains within a broad domain space. This strategy facilitates more effective few-shot object detection. The proposed augmentation-search strategy consists of the following steps:", + "bbox": [ + 88, + 545, + 482, + 787 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Step 1: Select the foundation model. This team adopts the Swin-B version of GroundingDINO as the foundation model, because of its best performance within the open-source model. This model has been pre-trained on a diverse set of large-scale datasets, including COCO, Objects365 (O365), GoldG, Cap4M, OpenImages, ODinW-35,", + "bbox": [ + 89, + 786, + 482, + 878 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "and RefCOCO, which collectively provide strong generalization capabilities across multiple vision-language grounding tasks.", + "bbox": [ + 511, + 90, + 903, + 135 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Step 2: Build a combined image augmentation pipeline. To improve the model's adaptability to various subdomains under limited data scenarios, this team construct a composite image augmentation pipeline. This pipeline randomly applies a combination of augmentation techniques such as CachedMosaic, YOLOXHSVRandomAug, RandomFlip, CachedMixUp, RandomResize, and RandomCrop. These methods are designed to enhance sample diversity, simulate domain shifts, and improve the model's robustness during fine-tuning. Additional data augmentation techniques, such as Copy-Paste, are also evaluated. However, these methods are found to introduce greater instability during few-shot fine-tuning.", + "bbox": [ + 511, + 136, + 906, + 332 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Step 3: Construct an optimized target domain validation set. To evaluate adaptation performance, a subset of the annotated test data is sampled and used as a validation set. Rather than employing full annotations, coarse-grained labeling is applied to provide sufficient supervision for hyperparameter tuning, while significantly reducing annotation costs in the target domain.", + "bbox": [ + 511, + 333, + 905, + 436 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Step 4: Search for the best model parameters on the validation set. Hyperparameter search and model selection are conducted based on validation performance. This process involves tuning the learning rate, augmentation intensity, and other training configurations to determine the optimal setup for effective domain adaptation.", + "bbox": [ + 511, + 438, + 905, + 529 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Step 5: Perform inference on the test set. Once the optimal configuration is identified, the fine-tuned model is applied to the held-out test set to evaluate its final performance on the target domain.", + "bbox": [ + 511, + 529, + 903, + 589 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.2.2. Training Details", + "text_level": 1, + "bbox": [ + 511, + 595, + 671, + 609 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Experiments are conducted on eight NVIDIA A100 GPUs, executing $8 \\times 50$ experiment groups per round. During training, the optimal step size is selected based on historical performance to accelerate the fine-tuning process. Learning rate schedules are adjusted using milestone epochs, typically set to 1, 5, and 9 depending on the fine-tuning setting. The model uses 900 queries by default and a maximum text token length of 256. A BERT-based text encoder with BPE tokenization is employed. Both the feature enhancer and cross-modality decoder consist of six layers, and deformable attention is adopted in the image cross-attention modules. The loss function comprises classification (or contrastive) loss, box L1 loss, and GIoU loss. Following the Grounding DINO framework, Hungarian matching weights are set to 2.0 (classification), 5.0 (L1), and 2.0 (GIoU), while the final loss weights are 1.0, 5.0, and 2.0, respectively. Although various hyperparameter configurations are also explored, their impact is found to be relatively minor compared to that of data augmentation strategies.", + "bbox": [ + 511, + 613, + 906, + 900 + ], + "page_idx": 5 + }, + { + "type": "page_footnote", + "text": "3https://github.com/jaychempan/ETS", + "bbox": [ + 107, + 886, + 377, + 898 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 493, + 925, + 504, + 936 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.3. IDCFS", + "text_level": 1, + "bbox": [ + 89, + 90, + 181, + 104 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.3.1. Proposed Method", + "text_level": 1, + "bbox": [ + 89, + 112, + 261, + 127 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "The IDCFS team proposes a Pseudo-Label Driven Vision-Language Grounding method for CD-FSOD. As shown in Figure 5, the proposed method mainly combines large-scale foundation models with an iterative pseudo-labeling strategy. The GLIP [35] is being fine-tuned using three approaches, with the full model fine-tuned delivering the best results in most cases. To better exploit the support set, an iterative training strategy is proposed and applied, using high-confidence predictions as pseudo-labels to refine the model. Additionally, this team also fine-tunes Grounding DINO [43] with LoRA [21], efficiently modifying the attention layers while freezing the base model. Finally, the model ensemble with confidence-reweighted NMS is further adopted to boost accuracy. Code is made available4.", + "bbox": [ + 89, + 131, + 483, + 344 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/85b77445909842aa7ec248e69a7ffa63394b4c2f44fa7b5de322b7355f4aed5c.jpg", + "image_caption": [ + "Figure 5. Team IDCFS: overview of the proposed Pseudo-Label Driven Vision-Language Grounding for CD-FSOD." + ], + "image_footnote": [], + "bbox": [ + 93, + 358, + 482, + 518 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Fine-tuning on GLIP. Foundation models pretrained on large-scale datasets, such as GLIP [35], have demonstrated strong performance in zero-shot and few-shot object detection tasks. The proposed method is based on the GLIP-L model, which has been pretrained on several datasets including FourODs, GoldG, CC3M+12M, and SBU. For downstream tasks, this team tried three ways to fine-tune GLIP: 1) Full Model Fine-Tuning: fine-tune all parameters of the GLIP-L model using a relatively small learning rate $(\\mathrm{lr} = 2\\mathrm{e} - 5)$ . 2) Prompt Tuning V1: fine-tune only the parameters of the text branch. 3) Prompt Tuning V2: This mode performs traditional prompt tuning by applying a linear layer to map the extracted text features. Experiments show that Full Model Fine-Tuning generally achieves the best fine-tuning performance in most cases.", + "bbox": [ + 88, + 574, + 482, + 800 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Iterative Training. Given the scarcity, high cost, and limited availability of annotated data in few-shot learning scenarios, this team also designed an iterative training approach to train the model, as shown in Figure 6. Specifically, the proposed method first fine-tunes the model for", + "bbox": [ + 89, + 801, + 483, + 878 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "a few steps using the available labeled data. Then, the fine-tuned model is used to predict the support set samples, selecting the predictions with high confidence as pseudolabels to update the label information of the support set samples. The model is then fine-tuned again. By iterating this process, the proposed method fully utilizes the information in the support set samples, achieving better performance while ensuring the robustness of the model, making it less susceptible to the influence of low-quality labels.", + "bbox": [ + 511, + 90, + 906, + 227 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/e1bae98ebc90a5af43497e591c20abbf6d9c63d8ec25b909a6702559d0ae8005.jpg", + "image_caption": [ + "Figure 6. Team IDCFS: overview of the iterative training process." + ], + "image_footnote": [], + "bbox": [ + 514, + 243, + 906, + 382 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Fine-tuning Grounding DINO with LoRA. The IDCFS team also uses Grounding DINO [43] as another foundation model to generate bounding boxes and classification probabilities. The LoRA [21] is used to fine-tune GroundingDINO on the few-shot training set. Specifically, this team adds bypass adapters to the linear projection layers (i.e., query, key, and value) of the attention mechanism in the visual backbone and BERT of Grounding DINO. To facilitate better adaptation to cross-domain datasets, the original model weights are frozen, and only the newly added parameters are trained.", + "bbox": [ + 511, + 420, + 906, + 585 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Model Ensemble. To effectively combine the outputs of GLIP and Grounding DINO, a model ensemble strategy with confidence reweighting is employed. Specifically, the detection scores from each model are scaled by predefined reliability weights. The reweighted predictions are then merged and refined using Non-Maximum Suppression (NMS) [47] to eliminate redundant bounding boxes and produce the final fused results. This approach allows the more reliable model to have a greater influence on the final predictions, enhancing detection performance by leveraging the complementary strengths of both models.", + "bbox": [ + 511, + 587, + 908, + 753 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.3.2. Training Details", + "text_level": 1, + "bbox": [ + 511, + 760, + 671, + 775 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "For GLIP fine-tuning, the GLIP-L variant is used, which incorporates Swin-L [46] as the visual encoder and BERT [7] as the text encoder. The model is pre-trained on a variety of datasets, including FourODs [29-31], GoldG [27], CC3M+12M, and SBU [49]. During fine-tuning, full-model training is applied with a reduced learning rate of 2e-5, compared to the original setting of 1e-4 in GLIP. For Grounding DINO, the Swin-B [46] backbone is used as the vi", + "bbox": [ + 511, + 779, + 908, + 902 + ], + "page_idx": 6 + }, + { + "type": "page_footnote", + "text": "4https://github.com/Pumpkinder/GLIP-CDFSOD", + "bbox": [ + 107, + 886, + 441, + 900 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 493, + 924, + 504, + 935 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "sual encoder and BERT from Hugging Face [78] as the text encoder. The model is pre-trained on COCO [41], Objects365 [65], GoldG [27], Cap4M, OpenImages [31], ODinW-35 [33], and RefCOCO [27]. For the 1-shot and 5-shot settings on the CARPK dataset [20], no fine-tuning is performed. For 1-shot training on DeepFruits [60], only the backbone is fine-tuned using LoRA. In all other cases, LoRA is used to fine-tune both the backbone and the BERT text encoder.", + "bbox": [ + 89, + 90, + 483, + 224 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.4. FDUROILab_Lenovo", + "text_level": 1, + "bbox": [ + 89, + 238, + 290, + 252 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.4.1. Proposed Method", + "text_level": 1, + "bbox": [ + 89, + 260, + 261, + 275 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Efficient Tuning. To enhance the model's adaptability in cross-domain few-shot detection (CDFSOD), this team proposes an efficient fine-tuning strategy. The proposed approach leverages data augmentation techniques to expand the training set and improve the model's ability to recognize objects in the target domain with proposed k-shot annotated samples.", + "bbox": [ + 89, + 279, + 482, + 385 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Specifically, given a k-shot setting, where $\\mathbf{k}$ represents the number of provided object samples, the proposed approach adopts a structured fine-tuning pipeline, which is shown in Figure 7.", + "bbox": [ + 89, + 386, + 483, + 446 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/9f7c58c6153c5492de9247cf262c01c36f6cfc8b799078979c8b237f362f2ad7.jpg", + "image_caption": [ + "Figure 7. Team FDUROILab_Lenovo: overview of the efficient tuning and inference." + ], + "image_footnote": [], + "bbox": [ + 91, + 459, + 480, + 571 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "(1) Object Cropping and Augmentation. Using the provided bounding boxes of k-shot examples, the proposed method first crops the target objects from the original images. The cropped objects are then subjected to various data augmentation techniques, including flipping, rotation, grayscale conversion, and other transformations, to introduce diversity and improve generalization. (2) Object Rescaling and Random Pasting. The proposed method randomly rescales the augmented objects to different sizes and pastes these transformed objects to the original images at different locations. This step simulates new object placements and enhances the model's robustness to variations in object appearance and context. (3) Fine-Tuning with Augmented Data. The proposed method finetunes the open-vocabulary detection model with the augmented images. This enables the detector to better adapt to objects in the target domain, even with minimal labeled examples. Additionally, the augmented data effectively increases the number of", + "bbox": [ + 89, + 628, + 483, + 900 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "training samples, mitigating the few-shot learning limitation and improving overall detection performance. Through this efficient fine-tuning approach, the finetuned model gains enhanced adaptability to new domains while maintaining the advantages of open-vocabulary detection.", + "bbox": [ + 511, + 90, + 905, + 167 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Inference. Since the proposed approach is based on an open-vocabulary detection model, it requires access to the target category labels during inference, which is shown in Figure 7. To obtain these labels, this team utilizes Qwen2.5-VL [1] to generate the textual descriptions of the target categories. The retrieved target labels from Qwen2.5-VL are used as textual input to guide the detection process. Then, the open-vocabulary detection model [11] is used to identify and classify objects in the test image based on the provided text-based labels.", + "bbox": [ + 511, + 169, + 906, + 319 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/c281bd39de4691807c301ba17b6d49273f8d7a7767a20c78259bbb45b9f42084.jpg", + "image_caption": [ + "Figure 8. Team FDUROILab_Lenovo: post processing." + ], + "image_footnote": [], + "bbox": [ + 516, + 333, + 906, + 476 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Post-Process. Although existing open-vocabulary detectors possess strong open-set detection capabilities, their performance on the challenge test set remains suboptimal. Upon further analysis, this team found that while the detector can successfully identify most objects, its primary weakness lies in classification errors rather than detection failures. This indicates that the open-vocabulary detection model still struggles with accurate classification when adapting to objects in a new domain. To address this issue, the Qwen2.5-VL is introduced as an auxiliary classifier to refine the final predictions, which is shown in Figure 8. For each test image, this team prompts Qwen2.5-VL to describe the objects present in the scene and provide a list of candidate categories that are likely to appear in the image. After that, this team refines the output of the open-vocabulary detection model using one of two strategies: (1) Filtering. Remove objects that are classified incorrectly by the detector and are not listed by Qwen2.5-VL. (2) Reclassification: Assign all detected objects to one of the categories predicted by Qwen2.5-VL, ensuring consistency between the detected bounding boxes and the high-level scene understanding of the multimodal model. The choice between these two strategies depends on the specific test dataset. By leveraging Qwen2.5-VL as a post-processing step, this team effectively corrects classification errors and enhances the", + "bbox": [ + 511, + 523, + 908, + 902 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 493, + 925, + 503, + 935 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "model's performance on unseen domains, leading to more accurate and reliable object detection results.", + "bbox": [ + 89, + 90, + 485, + 122 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "4.4.2. Training Details", + "text_level": 1, + "bbox": [ + 89, + 131, + 250, + 145 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "LLMDet [11] is adopted as the open-vocabulary detection model, with Swin-Large [46] serving as the visual backbone. The Qwen2.5-VL-72B [1] is introduced as the multimodal large language model (MLLM). Fine-tuning experiments are conducted on eight NVIDIA RTX 3090 GPUs, using a batch size of 8 and a learning rate of 1e-6. The number of training iterations varies across datasets and few-shot settings. For DeepFruits [60] and CarDD [76], the model is fine-tuned for 30, 50, and 100 batches under the 1-shot, 5-shot, and 10-shot settings. No fine-tuning is performed for CARPK [20].", + "bbox": [ + 89, + 150, + 483, + 316 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "To enhance classification accuracy, dataset-specific post-processing strategies are applied. For DeepFruits, all detected objects are reclassified into one of the categories predicted by Qwen2.5-VL. In the case of CarDD, detected objects not belonging to the predefined categories are filtered out. As CARPK contains only a single object category, no additional classification is performed. However, further filtering is applied to remove overly large bounding boxes, which are likely to be incorrect, as the objects in this dataset are generally small. In all cases, Non-Maximum Suppression (NMS) is used to eliminate redundant or overlapping predictions.", + "bbox": [ + 89, + 316, + 483, + 500 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "4.5. HUSTLab", + "text_level": 1, + "bbox": [ + 89, + 508, + 207, + 523 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "4.5.1. Proposed Method", + "text_level": 1, + "bbox": [ + 89, + 532, + 261, + 547 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "The HUSTLab explores the usage of Qwen2.5VL, MM-GroundingDINO, and LLMDet for the open-source CD-FSOD. The proposed method can be divided into two distinct phases: 1) Obtaining text descriptions from the training set using the Qwen2.5VL model; 2) Selecting a base model, such as Grounding DINO or LLMDet, and fine-tuning it with CopyPaste data augmentation, followed by Adversarial Weight Perturbation (AWP) training to derive the final model and obtain test results. We observe that models like Grounding DINO possess robust object detection capabilities, and fine-tuning them with few-shot data significantly enhances detection performance in specific domains. Moreover, for training sets with limited samples, utilizing text descriptions generated by large-scale vision-language models proves highly effective.", + "bbox": [ + 89, + 551, + 482, + 779 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Text Description Generation with a Large VLM. In this phase, this team leverages Qwen2.5VL to generate detailed text descriptions for the limited samples in the training set, extracting text-modal information from the images [50]. Converting visual-modal information into text-modal information helps eliminate noise and condense semantic content. These detailed text descriptions are robust and will be fully utilized during the testing phase to enhance cross", + "bbox": [ + 89, + 780, + 483, + 901 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/cda92802f910d1d9c86fcb9690a9c013f2143055bf9564f0a4664b98cfef3300.jpg", + "image_caption": [ + "Figure 9. Team HUSTLab: overall framework of the proposed method." + ], + "image_footnote": [], + "bbox": [ + 514, + 90, + 874, + 181 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "domain few-shot object detection performance.", + "bbox": [ + 511, + 251, + 826, + 266 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/02bfe67dc0273f0b6bb1570a13e9e272e3fc2bb1f70285eea045d85d22cd74f4.jpg", + "image_caption": [ + "Figure 10. Team HUSTLab: text description generation [50]." + ], + "image_footnote": [], + "bbox": [ + 522, + 286, + 901, + 364 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Training Phase. In this stage, this team first selects an appropriate base model—either Grounding DINO[43] or LLMDet—based[11] on its compatibility with the dataset. Using the zero-shot capabilities of the chosen base model, this team generates pseudo-labels, which are combined with ground-truth labels during training to regularize the model under few-shot conditions. To fine-tune the base model, this team uses CopyPaste[17] data augmentation and Adversarial Weight Perturbation (AWP) techniques[79]. This approach strengthens the model's generalization and robustness, enabling it to effectively handle cross-domain few-shot object detection tasks.", + "bbox": [ + 511, + 407, + 906, + 589 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "4.5.2. Training Details", + "text_level": 1, + "bbox": [ + 511, + 599, + 671, + 614 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "The model is fine-tuned on three datasets using the MM-GroundingDINO-Large implementation provided by MMDetection as the base object detection framework, with the aim of enhancing cross-domain detection capabilities. The performance largely depends on prompt design. Since part of the BERT-based text encoder is kept frozen during training, prompt quality plays a crucial role in boosting performance for certain object detection tasks. Prompts generated using Qwen2.5-VL are able to accurately describe the attribute features associated with abstract category names, thereby assisting the model in object localization and recognition. All experiments are conducted on $4 \\times$ NVIDIA RTX 3090 GPUs.", + "bbox": [ + 511, + 619, + 906, + 814 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "4.6. TongjiLab", + "text_level": 1, + "bbox": [ + 511, + 827, + 630, + 843 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "4.6.1. Proposed Method", + "text_level": 1, + "bbox": [ + 511, + 849, + 684, + 864 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "The TongjiLab proposes ProtoDINO, an innovative approach for CD-FSOD under the open-set setting, building", + "bbox": [ + 511, + 869, + 905, + 901 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 493, + 924, + 504, + 935 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "upon GroundingDINO [43] as the baseline model. To improve the target classification performance of the baseline model, the CLIP model [22, 54] is employed to extract both local and global image features from a limited set of target domain samples. These features are subsequently used to construct support sets, which serve as the foundation for building local prototype and global prototype networks, respectively. In addition, a text prototype network is developed using the CLIP model. During the target detection phase, visual features are extracted from each image query using CLIP. The L2 distances between these visual features and the local prototypes, global prototypes, and text prototypes are then computed, with these distances serving as one of the metrics for target classification. Furthermore, a car-damage-detection model5, implemented as a vehicle appearance damage classification model based on the Vision Transformer (ViT), is incorporated. For the final target classification, matching probabilities derived from the GroundingDINO model, the car-damage-detection model, and the prototype networks [66] are weighted and combined to produce the overall classification metric.", + "bbox": [ + 89, + 90, + 483, + 407 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "The framework of the proposed ProtoDINO is depicted in Fig. 11. Overall, ProtoDINO operates in two key stages: prototype construction and target detection.", + "bbox": [ + 89, + 409, + 483, + 455 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/8e3b6e9faa6f1d53069bf24196c367f48a6404b2c370382e50a39429b69ee961.jpg", + "image_caption": [ + "Figure 11. Team TongjiLab: framework of the proposed ProtoDINO." + ], + "image_footnote": [], + "bbox": [ + 91, + 468, + 480, + 651 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Prototype Construction. During the prototype construction phase, this team crops few-shot learning images based on their annotations and generates visual embeddings as local feature prototypes $c_{local}$ for these local patches using the CLIP model. For 5-shot and 10-shot settings, $c_{local}$ is computed as the mean of all visual embeddings within the same category. Similarly, global feature prototypes $c_{global}$ are derived by encoding entire images through CLIP and applying the same averaging strategy across categories. For each category text $t$ , this team builds the text prototype $c_{text}$ using CLIP as the text encoder.", + "bbox": [ + 89, + 709, + 483, + 875 + ], + "page_idx": 9 + }, + { + "type": "equation", + "text": "\n$$\nc _ {l o c a l} ^ {(n)} = \\frac {1}{n} \\sum_ {i = 1} ^ {n} F _ {c r o p} ^ {(i)} \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 638, + 103, + 906, + 142 + ], + "page_idx": 9 + }, + { + "type": "equation", + "text": "\n$$\nc _ {g l o b a l} ^ {(n)} = \\frac {1}{n} \\sum_ {i = 1} ^ {n} F _ {i} ^ {(i)} \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 638, + 148, + 906, + 188 + ], + "page_idx": 9 + }, + { + "type": "equation", + "text": "\n$$\nc _ {t e x t} ^ {(n)} = f _ {\\text {c l i p - t e x t}} \\left(t ^ {(n)}\\right) \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 633, + 195, + 906, + 215 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Target Detection. In the target detection stage, the input image and target category texts are processed by GroundingDINO to generate bounding boxes and initial classification probabilities. These bounding boxes are used to crop local regions from the image, which are then encoded by CLIP to obtain their visual features $F_{crop}$ . To classify these regions, this team computes the L2 distances between their representations and the precomputed prototypes as in Eq. 4. These distances are transformed into probability distributions via a softmax operation, yielding the prototype network's classification output as in Eq. 5. Simultaneously, the cropped regions are evaluated by a pre-trained car-damage-detection model (based on Vision Transformer) to generate additional classification probabilities. The final classification decision is derived by aggregating probabilities from GroundingDINO, the car-damage-detection model, and the prototype network through a weighted summation as in Eq. 6. This fusion approach effectively integrates geometric localization from GroundingDINO, cross-modal semantics from CLIP, domain-specific insights from the car-damage-detection model, and few-shot prototype matching.", + "bbox": [ + 511, + 220, + 906, + 540 + ], + "page_idx": 9 + }, + { + "type": "equation", + "text": "\n$$\nd (u, v) = \\sqrt {\\sum_ {n} \\left(u ^ {n} - v ^ {n}\\right) ^ {2}} \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 616, + 550, + 906, + 590 + ], + "page_idx": 9 + }, + { + "type": "equation", + "text": "\n$$\np r o b s _ {p r o t o} = - \\frac {1}{\\sigma} \\cdot e ^ {N o r m [ d (F, c) ]} \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 596, + 597, + 906, + 626 + ], + "page_idx": 9 + }, + { + "type": "equation", + "text": "\n$$\np r o b s = \\sum_ {i} w _ {i} \\cdot p r o b s _ {i} \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 625, + 633, + 905, + 664 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "4.6.2. Training Details", + "text_level": 1, + "bbox": [ + 513, + 670, + 671, + 685 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "The implementation is carried out on a server running CentOS 7, equipped with a single RTX 6000 Ada GPU. For the CLIP model, the DFN5B-CLIP-ViT-H-14-378 implementation is selected due to its balance between performance and efficiency in processing visual and textual data. For the GroundingDINO model, the official implementation is used. Based on empirical observations, the threshold parameter $\\sigma$ is set to 0.5, which provides optimal results across various scenarios. In GroundingDINO, the bounding box confidence threshold (BOX_THRESHOLD) is set to 0.3. For the final decision fusion, the weighting coefficients for integrating outputs from multiple modules are empirically assigned as: $w_{\\mathrm{local}} = 0.25$ (local prototype network), $w_{\\mathrm{global}} = 0.15$ (global prototype network), $w_{\\mathrm{text}} = 0.4$ (text", + "bbox": [ + 511, + 688, + 906, + 902 + ], + "page_idx": 9 + }, + { + "type": "page_footnote", + "text": "5 https://huggingface.co/beingamit99/car_damage_detector/tree/main", + "bbox": [ + 107, + 886, + 475, + 900 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 490, + 924, + 509, + 936 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "prototype network), $w_{\\mathrm{dino}} = 0.1$ (GroundingDINO), and $w_{\\mathrm{car}} = 0.1$ (car-damage-detection model).", + "bbox": [ + 89, + 90, + 485, + 122 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "4.7. Manifold", + "text_level": 1, + "bbox": [ + 89, + 132, + 199, + 146 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "4.7.1. Proposed Method", + "text_level": 1, + "bbox": [ + 89, + 155, + 261, + 170 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "To address the challenge of few-shot object detection in cross-domain scenarios, the Manifold team proposes a novel approach based on the detection pipeline of a two-stage object detection algorithm. As illustrated in the Figure. 12, the proposed method first employs an open set object detection network, which is trained on public datasets, to detect objects in the query image. However, due to the domain gap between the pretraining datasets and the query datasets, the detection results cannot be directly trusted. Therefore, this team treats these results as region proposals that may contain objects of interest. Subsequently, this team combines the instance features from the support set for classification to obtain the final detection results.", + "bbox": [ + 88, + 174, + 483, + 369 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/e2f28966d29b1d9309d4b9cb28111ee3b3de5a35e827252d153c528d74e5800c.jpg", + "image_caption": [ + "Figure 12. Team Manifold: overall framework of GDPRE." + ], + "image_footnote": [], + "bbox": [ + 94, + 385, + 483, + 520 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "GroundingDINO-based Region Proposals. The GroundingDINO is selected as the pre-trained open-set object detector. It can detect objects of interest in images using input text, and it was pre-trained on seven datasets: COCO, O365, GoldG, Cap4M, OpenImage, ODinW-35, and RefCOCO. This pre-training gives it good detection capabilities for most real-world objects. However, in cross-domain few-shot scenarios, its detection effectiveness is suboptimal. For example, avocados may be misclassified as oranges because of the higher frequency of oranges in the pre-training data. Despite this, GroundingDINO can still provide region proposals for potential objects of interest in query images.", + "bbox": [ + 89, + 566, + 482, + 748 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "ResNet-based Feature Classification. After obtaining region proposals, this team classifies the objects within them using support set images. Given the limited samples and significant intra-class variations in image space, directly matching support instances with query candidates in this space yields poor results. ResNet pre-trained on ImageNet is used to extract image features, mapping instances to a more robust feature space. To address scale differences, this team resize instances in both support and region proposals images to $256 \\times 256$ for feature extraction. Considering", + "bbox": [ + 89, + 750, + 482, + 901 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "some classes have large intra-class and small inter-class differences, this team treats each instance's feature vector in multi-shot settings as a separate support vector rather than averaging them by class. This team calculates the cosine similarity between candidate region instances and support set instance feature vectors, assigning the region proposal instance to the class of the most similar support instance. This yields the final detection results, and the cosine similarity serves as the prediction confidence.", + "bbox": [ + 511, + 90, + 906, + 227 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "4.7.2. Implementation Details", + "text_level": 1, + "bbox": [ + 511, + 238, + 723, + 253 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Given that both GroundingDINO and ResNet are pretrained on large-scale datasets, fine-tuning them under few-shot constraints—where the training classes do not overlap with the test classes—can be challenging. As a result, the pre-trained model weights are kept frozen. This approach requires minimal computational resources and can be executed on a laptop equipped with an RTX 4060 GPU. During inference, the category names from the test dataset are used as prompt inputs for GroundingDINO, and the BOX_THRESHOLD is set to 0.1 to obtain the final detection results.", + "bbox": [ + 511, + 257, + 906, + 424 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "4.8.MXT", + "text_level": 1, + "bbox": [ + 511, + 436, + 591, + 450 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "4.8.1. Proposed Method", + "text_level": 1, + "bbox": [ + 511, + 460, + 683, + 474 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "This team proposes a Domain Adaptation Enhancement Module (DAEM) for Cross-Domain Few-Shot Object Detection (CD-FSOD), built as an extension to the CD-ViTO framework. While CD-ViTO provides a strong foundation for open-set cross-domain detection with DinoV2 ViT-L backbone, it still faces challenges with significant domain shifts. As illustrated in Fig 13, the DAEM integrates seamlessly with the DinoV2 ViT-L backbone and enhances domain adaptation through two complementary mechanisms: batch enhancement and feature alignment.", + "bbox": [ + 511, + 479, + 906, + 632 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/01810eb83ce91b149e958f580b0174e2285a8dc0d030ed2e1c8e83e917133bfe.jpg", + "image_caption": [ + "Figure 13. Team DAEM: overall of the proposed model." + ], + "image_footnote": [], + "bbox": [ + 517, + 646, + 906, + 811 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Batch Enhancement Module. The batch enhancement module increases training diversity through controlled style transfer between domains. For both source and target do", + "bbox": [ + 511, + 854, + 906, + 902 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 490, + 924, + 506, + 936 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "main images, this team introduces cross-domain characteristics while preserving semantic content:", + "bbox": [ + 89, + 90, + 482, + 122 + ], + "page_idx": 11 + }, + { + "type": "equation", + "text": "\n$$\n\\operatorname {i m g} _ {\\text {s t y l e d}} = \\sigma_ {t} \\cdot \\frac {\\operatorname {i m g} - \\mu_ {s}}{\\sigma_ {s}} + \\mu_ {t} \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 176, + 138, + 482, + 170 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "where $\\mu_s, \\sigma_s$ are source image statistics and $\\mu_t, \\sigma_t$ are target domain statistics. The enhancement strength $\\alpha$ gradually increases during training as follows:", + "bbox": [ + 89, + 180, + 483, + 226 + ], + "page_idx": 11 + }, + { + "type": "equation", + "text": "\n$$\n\\alpha = \\min (1. 0, \\frac {t}{T _ {\\text {w a r m u p}}}) \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 200, + 241, + 482, + 273 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "where $t$ is the current iteration and $T_{warmup}$ is set to 500. This gradual adaptation prevents disrupting the pre-trained DinoV2 ViT-L features early in training.", + "bbox": [ + 89, + 284, + 483, + 329 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Feature Alignment Module. The feature alignment module employs two complementary strategies to reduce domain gaps: Maximum Mean Discrepancy (MMD) and style-based adaptation.", + "bbox": [ + 89, + 330, + 483, + 391 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "MMD Loss: The Maximum Mean Discrepancy is applied to reduce distribution differences between features from the source and target domains. MMD measures the distance between feature distributions in a reproducing kernel Hilbert space:", + "bbox": [ + 89, + 392, + 483, + 468 + ], + "page_idx": 11 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {M M D} \\left(\\mathbf {X} _ {s}, \\mathbf {X} _ {t}\\right) = \\left\\| \\frac {1}{n _ {s}} \\sum_ {i = 1} ^ {n _ {s}} \\phi \\left(\\mathbf {x} _ {s} ^ {i}\\right) - \\frac {1}{n _ {t}} \\sum_ {j = 1} ^ {n _ {t}} \\phi \\left(\\mathbf {x} _ {t} ^ {j}\\right) \\right\\| _ {\\mathcal {H}} ^ {2} \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 104, + 496, + 482, + 559 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "This is implemented with multiple Gaussian kernels with bandwidths $\\sigma \\in \\{0.5, 1.0, 2.0, 5.0\\}$ to capture similarities at different feature scales. This approach guides DinoV2 ViT-L to preserve its powerful representation abilities while adapting to target domains with minimal samples.", + "bbox": [ + 89, + 561, + 483, + 637 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Style Loss: Style-based adaptation addresses visual variations between domains that are unrelated to object semantics. For feature maps $\\mathbf{F}$ , the channel-wise statistics is transformed as:", + "bbox": [ + 89, + 638, + 483, + 696 + ], + "page_idx": 11 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {\\mathbf {F}} = \\sigma_ {t} \\cdot \\frac {\\mathbf {F} - \\mu_ {s}}{\\sigma_ {s}} + \\mu_ {t} \\tag {10}\n$$\n", + "text_format": "latex", + "bbox": [ + 207, + 714, + 482, + 747 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "where $\\mu_s, \\sigma_s$ and $\\mu_t, \\sigma_t$ are the channel statistics of source and target features. This approach helps Di-noV2 ViT-L focus on domain-invariant object characteristics rather than domain-specific visual styles.", + "bbox": [ + 89, + 757, + 483, + 816 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "The overall training objective combines the original CDViTO detection loss with the proposed domain adaptation components:", + "bbox": [ + 89, + 819, + 483, + 864 + ], + "page_idx": 11 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} = \\mathcal {L} _ {\\text {d e t}} + \\lambda_ {m m d} \\mathcal {L} _ {M M D} + \\lambda_ {\\text {s t y l e}} \\mathcal {L} _ {\\text {s t y l e}} \\tag {11}\n$$\n", + "text_format": "latex", + "bbox": [ + 147, + 885, + 482, + 902 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "4.8.2. Training Details", + "text_level": 1, + "bbox": [ + 513, + 90, + 671, + 106 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Following the pretrain–finetune–test pipeline established in the CD-FSOD benchmark, the pretrained DinoV2 ViT-L backbone from CD-ViTO is utilized. During fine-tuning, the backbone and Region Proposal Network (RPN) are selectively frozen, while the Domain-Adaptive Enhancement Modules (DAEM) and ROI Heads are optimized. This strategy preserves the general representational power of DinoV2 ViT-L while allowing domain-specific components to adapt effectively.", + "bbox": [ + 511, + 109, + 906, + 244 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Training is conducted on NVIDIA A800 GPUs, with hyperparameters determined through extensive experimentation: the MMD loss weight is set to $\\lambda_{mmd} = 0.16$ , the style loss weight to $\\lambda_{style} = 0.12$ , and the batch enhancement strength to $\\alpha_{max} = 0.8$ . Differential learning rates are applied, using a multiplier of 2.0 for the DAEM modules and bias terms, with a base learning rate of $1 \\times 10^{-4}$ .", + "bbox": [ + 511, + 247, + 906, + 352 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "A warm-up phase of 500 iterations is introduced to gradually increase adaptation strength. This helps stabilize early-stage training and prevents disruption of the pretrained DinoV2 ViT-L features. Optimization is performed using stochastic gradient descent (SGD) with a momentum of 0.9 and a weight decay of $1 \\times 10^{-4}$ . The model reaches optimal cross-domain performance after approximately 50 epochs. The proposed approach maintains the efficiency of CD-ViTO while delivering substantial improvements in challenging cross-domain few-shot detection scenarios.", + "bbox": [ + 511, + 354, + 908, + 503 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "5. Special Closed-Source Track Methods", + "text_level": 1, + "bbox": [ + 511, + 518, + 856, + 537 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "5.1. X-Few", + "text_level": 1, + "bbox": [ + 513, + 545, + 601, + 559 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "5.1.1. Proposed Method", + "text_level": 1, + "bbox": [ + 511, + 566, + 684, + 582 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "To address the challenges of domain shift and category confusion arising from limited annotated data in CD-FSOD, the X-Few team proposes a novel domain adaptation strategy based on the Instance Feature Caching (IFC) mechanism. The framework of the proposed method is shown in Fig. 14, which is mainly built upon the CD-ViTO baseline. Code is made available $^{6}$ .", + "bbox": [ + 511, + 587, + 905, + 691 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Intuitively, the IFC module is proposed to construct a cache model that could store and dynamically retrieve discriminative instance-level features from the target domain, alleviating model degradation caused by cross-domain distribution discrepancy in the few-shot supervision situation. Specifically, the IFC mechanism facilitates knowledge transfer through prototype-based feature alignment and an attention-guided memory update strategy, enhancing the model's generalization capability in the data-scarce cross-domain scenario.", + "bbox": [ + 511, + 693, + 906, + 843 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Instance Feature Caching Construction. Given a support set $S$ comprising $N$ target categories, each consisting", + "bbox": [ + 511, + 845, + 905, + 876 + ], + "page_idx": 11 + }, + { + "type": "page_footnote", + "text": "$^{6}$ https://github.com/johnmaijer/X-Few-_CD-FSOD", + "bbox": [ + 529, + 886, + 888, + 900 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 490, + 924, + 508, + 936 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/3a8a5a344c8d7a48b7252170d1c6170ca1b9ad7c65a3ecb9a38a78a25b908a37.jpg", + "image_caption": [ + "Figure 14. Team X-Few: illustration of the proposed Instance Feature Caching (IFC)." + ], + "image_footnote": [], + "bbox": [ + 93, + 88, + 482, + 215 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "of $K$ annotated instances, denoted as $I_{K}$ with their associating labels $L_{N}$ . For all $N \\times K$ support samples, the proposed method leverages a pre-trained DINoV2 ViT $f_{CM}$ to obtain the instance-level features $F_{train} \\in \\mathbf{R}^{NK \\times C}$ . Similarly, the ground-truth labels are also encoded into $N$ -dimensional one-hot vectors $L_{train} \\in \\mathbf{R}^{NK \\times N}$ :", + "bbox": [ + 89, + 275, + 483, + 367 + ], + "page_idx": 12 + }, + { + "type": "equation", + "text": "\n$$\nF _ {t r a i n} = \\mathbf {f} _ {C M} \\left(I _ {K}\\right) \\tag {12}\n$$\n", + "text_format": "latex", + "bbox": [ + 220, + 375, + 480, + 390 + ], + "page_idx": 12 + }, + { + "type": "equation", + "text": "\n$$\nL _ {\\text {t r a i n}} = \\mathbf {O n e H o t} \\left(I _ {N}\\right) \\tag {13}\n$$\n", + "text_format": "latex", + "bbox": [ + 207, + 397, + 480, + 412 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "The feature extraction step is performed in an offline fashion to ensure persistent storage of high-quality feature representations for support set instances, thereby preserving discriminative semantic characteristics and spatial-aware contextual patterns in a memory-efficient manner. Then, these features and their corresponding label encodings are systematically cached to establish a comprehensive knowledge base that facilitates adaptive domain-aware detection while mitigating catastrophic forgetting.", + "bbox": [ + 89, + 416, + 483, + 551 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Instance Search. After constructing the instance feature caching, given a query image $\\mathcal{L}$ , the proposed method first feeds $\\mathcal{L}$ into both the Region Proposal Network and the Vision Transformer encoder to generate candidate regions and extract their deep features, respectively. These region proposals are then combined with the corresponding instance-level features in $\\mathcal{L}$ to derive a query vector $f_{test}$ for each candidate bounding box. Then, the proposed method achieves the most relevant instance feature lookup and finally calculate the adaptation representation $A \\times L_{train}$ for the target domain, where $\\mathbf{A} \\in \\mathbf{R}^{NK}$ is the affinity matrix between query vector and instance feature caching, being defined as:", + "bbox": [ + 89, + 551, + 483, + 748 + ], + "page_idx": 12 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {A} = \\exp (- \\beta (1 - f _ {\\text {t e s t}} F _ {\\text {t r a i n}} ^ {T})) \\tag {14}\n$$\n", + "text_format": "latex", + "bbox": [ + 179, + 755, + 480, + 773 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Ultimately, the domain adaptation representation is fed into the classification and regression branches of the original detection framework to calibrate prediction results from the open-set detector:", + "bbox": [ + 89, + 779, + 482, + 839 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "1. Classification Enhancement: The similarity distribution between query features and cached features is leveraged to refine confidence estimates for the target domain categories through contrastive alignment.", + "bbox": [ + 91, + 839, + 483, + 901 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "2. Localization Refinement: Retrieved instance localization priors are incorporated to constrain bounding box regression, thereby mitigating cross-domain localization biases caused by domain shifts.", + "bbox": [ + 511, + 90, + 903, + 151 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "The above two strategies ensure that the detector adaptively aligns domain-invariant semantic representations while suppressing spurious correlations introduced by cross-domain discrepancies.", + "bbox": [ + 511, + 152, + 903, + 212 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "5.1.2. Training Details", + "text_level": 1, + "bbox": [ + 511, + 220, + 671, + 234 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "A single RTX A800 GPU is used for the experiments. The model is pre-trained on COCO and fine-tuned on novel support images. For the DeepFruit[60], Carpk[20], and CarDD[76], the specific hyper-parameters settings are shown in the Tab. 2. The tailored combination of learning rates and epoch schedules reflects a fine-grained tuning strategy to address domain heterogeneity across datasets, ensuring optimal trade-offs between generalization and task-specific optimization.", + "bbox": [ + 511, + 239, + 906, + 377 + ], + "page_idx": 12 + }, + { + "type": "table", + "img_path": "images/6fc03298da45892b97c900a30b15ac1c7bd6dd9d8d85b21db84865a5f8f20679.jpg", + "table_caption": [ + "Table 2. Team X-Few: the hyper-parameters settings." + ], + "table_footnote": [], + "table_body": "
hyperparameter/shotDeepFruit [60]Carpk [20]CarDD [76]
151015101510
Batch size161616161616161616
Initial lr1e-31e-31e-31e-41e-41e-41e-31e-31e-3
Epoch40100200408010040100200
", + "bbox": [ + 517, + 416, + 921, + 481 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "5.2. MM", + "text_level": 1, + "bbox": [ + 511, + 507, + 584, + 522 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "5.2.1. Proposed Method", + "text_level": 1, + "bbox": [ + 511, + 530, + 683, + 545 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "The MM team proposes a novel DFE-ViT method for CD-FSOD, in the closed set setting, which only takes COCO as the source data and transfers the model to a novel target. As in Fig. 15, the proposed DFE-ViT method is built upon one open-set detector (DE-ViT) and finetuned using a few labeled instances from the target domain. New improvements include Instance Feature Enhancement, ROI Feature Enhancement.", + "bbox": [ + 511, + 549, + 906, + 669 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/0ccac6ed1ed6b29bebe32679af7dcabc465e116a0d16d870635d362ad7bd1b03.jpg", + "image_caption": [ + "Figure 15. Team MM: overall framework of the DFE-ViT." + ], + "image_footnote": [], + "bbox": [ + 517, + 685, + 901, + 808 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Specifically, given $S$ and $q$ as input, DFE-ViT follows a similar pipeline as DE-ViT to obtain instance features $F_{ins}$ , region proposals $R_{q}$ , visual features $F_{q}$ , and ROI features", + "bbox": [ + 511, + 854, + 905, + 902 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 490, + 924, + 506, + 936 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "$F_{q_{roi}}$ . However, different from directly using $F_{ins}$ to derive the class prototypes, an Instance Feature Enhancement module (IFE) and an ROI Feature Enhancement module (RFE) are proposed to enhance feature representation from both instance-level and ROI-level perspectives.", + "bbox": [ + 89, + 90, + 480, + 166 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "The IFE module adopts a residual CBAM structure to refine $F_{ins}^{ob}$ , enabling the network to adaptively emphasize informative channels and spatial regions. To guide this attention process more explicitly, a dedicated CBAM loss $\\mathcal{L}_{cbam}$ is designed, which encourages the enhanced instance features to align with salient regions in both spatial and channel dimensions. Furthermore, to enhance semantic alignment, a class prototype enhancement mechanism is further incorporated where each object instance interacts with its corresponding class prototype via cross-attention, ensuring more discriminative and category-aware features. The output of IFE is optimized jointly with the standard detection losses, including the localization loss $\\mathcal{L}_{loc}$ , classification loss $\\mathcal{L}_{cls}$ , and the attention-guided loss $\\mathcal{L}_{cbam}$ .", + "bbox": [ + 88, + 167, + 482, + 378 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "For ROI features, this team introduces RFE based on a Variational Autoencoder (VAE). Each ROI feature $F_{q_{roi}}$ is encoded into a latent distribution and then reconstructed, which enables learning a more robust and expressive representation. A reconstruction loss $\\mathcal{L}_{vae}$ is employed to ensure fidelity and consistency in the learned latent space. This ROI-level enhancement complements the instance-level refinement, offering a more diversified and generalized feature representation.", + "bbox": [ + 89, + 378, + 482, + 513 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "The top modules including the detection head $M_{DET}$ and the classification head $M_{CLS}$ are fine-tuned using the combined objective:", + "bbox": [ + 89, + 513, + 482, + 559 + ], + "page_idx": 13 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} = \\mathcal {L} _ {l o c} + \\mathcal {L} _ {c l s} + \\alpha * \\mathcal {L} _ {c b a m} + \\beta * \\mathcal {L} _ {v a e}. \\tag {15}\n$$\n", + "text_format": "latex", + "bbox": [ + 130, + 570, + 480, + 585 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Instance Feature Enhancement. The IFE module aims to refine instance features by integrating spatial/channel attention and semantic guidance. Given input instance features $F_{ins} \\in \\mathbb{R}^{B \\times C \\times H \\times W}$ , it first applies a residual CBAM to obtain spatially and channel-refined features $F_{cbam}$ . Then, class prototypes $P \\in \\mathbb{R}^{N \\times C}$ are used to semantically enhance the instance features via a cross-attention mechanism. Specifically, query and key projections are computed as $Q = W_qF_{ins}$ and $K = W_kP$ , followed by attention: $A = \\text{softmax}(QK^\\top / \\sqrt{d})$ . The attended prototype features are added with a learnable weight $\\gamma$ , yielding $F_{proto}$ . The final enhanced features are computed as $F_{enh} = F_{cbam} + F_{proto}$ , which are more discriminative for downstream detection.", + "bbox": [ + 89, + 597, + 482, + 808 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "ROI Feature Enhancement. The RFE module is based on a Variational Autoencoder and class prototype computation. As shown in Fig. 15, the orange modules represent the newly proposed contributions: using VAE to model ROI features and enriching them with class prototypes. Given input ROI features $x \\in \\mathbb{R}^{N \\times C \\times k \\times k}$ , VAE", + "bbox": [ + 89, + 810, + 482, + 901 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "encodes $x$ into latent mean $\\mu \\in \\mathbb{R}^{N \\times d}$ and log-variance $\\log \\sigma^2 \\in \\mathbb{R}^{N \\times d}$ through linear layers. Latent variables are sampled as $z = \\mu + \\sigma \\odot \\epsilon$ using the reparameterization trick. Then, $z$ is decoded to reconstruct the ROI features $\\hat{x} = \\mathrm{Decoder}(z)$ . The reconstruction loss is computed as $L_{\\text{recon}} = \\frac{1}{N} \\sum_{i=1}^{N} \\| \\hat{x}_i - x_i \\|^2$ , and the KL divergence loss regularizes the latent distribution: $L_{KL} = -\\frac{1}{2} \\sum_{i=1}^{N} (1 + \\log \\sigma_i^2 - \\mu_i^2 - \\sigma_i^2)$ . The total VAE loss is $L_{vae} = L_{\\text{recon}} + L_{KL}$ . Finally, class prototypes are computed to further enhance feature representation across categories.", + "bbox": [ + 511, + 90, + 903, + 258 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "5.2.2. Training Details", + "text_level": 1, + "bbox": [ + 511, + 268, + 671, + 282 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "The model is trained in the \"pretrain, finetune, and test\" pipeline. Specifically, the base DE-ViT model pretrained on COCO is taken, then the $M_{DET}$ , $M_{CLS}$ , $IFE$ and $RFE$ are tuned on novel support images $S$ using the loss as in Eq. 15. The hyperparameter $\\alpha$ temperature for $\\mathcal{L}_{cbam}$ , $\\beta$ temperature for $\\mathcal{L}_{vae}$ are set as 0.3, 0.4 for all the target datasets. While the value $N_{dom}$ means the number of virtual domains depending on the number of target classes $N$ , specifically, $N_{dom} = 2 * N$ . The hyperparameter Top-K ( $K$ ) in DE-ViT is set to 5. For datasets with the number of classes $N$ less than 5, $K$ is set to $N$ . The trainable parameters are finetuned on 1-shot around 80 epochs, and on 5/10-shot around 50 epochs. The SGD with a learning rate of 0.002 is used as the optimizer. Experiments are performed on four A6000 GPUs.", + "bbox": [ + 511, + 287, + 906, + 513 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "5.3. FSV", + "text_level": 1, + "bbox": [ + 511, + 525, + 584, + 539 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "5.3.1. Proposed Method", + "text_level": 1, + "bbox": [ + 511, + 547, + 683, + 561 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "The FSV team proposes an enhancement to the prototype-based detection for the cross-domain few-shot object detection (CD-FSOD) challenge under the closed-source setting, based on the CD-ViTO baseline model, as shown in Figure 16. Based on observations of the existing approach, this team found that CD-FSOD faces three key challenges. First, few-shot learning inherently suffers from limited example diversity. Second, conventional binary masking treats all spatial locations within an object region equally, which fails to prioritize more discriminative central regions over potentially noisy boundary areas. Third, standard cosine similarity calculations between query features and prototypes lack proper calibration, resulting in suboptimal separability across domain shifts. To solve these three challenges, this team explores three techniques: (1) Support Set Data Augmentation, (2) Soft Mask-Based Prototype Aggregation, and (3) Temperature-Scaled Similarity Calibration.", + "bbox": [ + 511, + 566, + 903, + 824 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Support Set Data Augmentation. For the support images, the proposed approach constructs a stochastic augmentation function to increase the diversity of the samples. DINOv2 [48] is used as the feature extraction backbone for the augmented data, for its robust self-supervised learning capa", + "bbox": [ + 511, + 825, + 903, + 901 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 490, + 924, + 508, + 936 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/8e0a5ce6cbe5f1bc98968e9128663375e2bf4d8e17311e0087fd301456ee1c0b.jpg", + "image_caption": [ + "Figure 16. Team FSV: overview of the proposed method." + ], + "image_footnote": [], + "bbox": [ + 107, + 92, + 472, + 215 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "bilities and effective cross-domain transfer. The augmentation pipeline consists of a composition of transformations including Random Saturation, Random Contrast, Random Brightness, Random Flip, Random Rotation, Random Crop, Random Erasing, and Resize Shortest Edge.", + "bbox": [ + 89, + 256, + 482, + 330 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Soft Mask-Based Prototype Aggregation. To prioritize more discriminative central regions over potentially noisy boundary areas, the conventional binary masks are replaced by Gaussian soft masks to create soft spatial attention. Let $F_{ins} = \\{F_{ins}^{ob}, F_{ins}^{bg}\\}$ denote the extracted instance features and $M$ denote the binary mask of an instance. The soft mask could be defined $\\tilde{M}$ as: $\\tilde{M} = \\frac{G_{\\sigma}(M)}{\\max G_{\\sigma}(M)}$ , where $G_{\\sigma}$ is the Gaussian filter with standard deviation parameter $\\sigma$ . The extracted instance features for foreground objects $F_{ins}^{ob}$ are then weighted by the soft mask $\\tilde{M}$ , used as the initialization for learnable instance features.", + "bbox": [ + 89, + 332, + 483, + 500 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Temperature-Scaled Similarity Calibration. Finally, to calibrate image features to other domains, the proposed approach takes temperature scaling to make the final prototypes better match those in the new domain, which is a simple yet effective strategy to improve the discriminability of similarity scores. Let $F_{q_{roi}}$ denote the ROI features extracted from a query image using DINOv2. $F_{pro}$ denotes the prototype vector. The temperature scaling is applied during the cosine similarity computation as", + "bbox": [ + 89, + 501, + 483, + 638 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\ns _ {\\tau} = \\frac {F _ {q _ {r o i}} ^ {\\top} F _ {p r o}}{\\tau \\cdot \\| F _ {q _ {r o i}} \\| \\cdot \\| F _ {p r o} \\|}, \\tag {16}\n$$\n", + "text_format": "latex", + "bbox": [ + 194, + 647, + 482, + 685 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "where $\\tau$ is a temperature parameter that controls the sharpness of the similarity distribution. By tuning the temperature parameter, the entropy of the output distribution can be better modulated.", + "bbox": [ + 89, + 693, + 483, + 753 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "5.3.2. Implementation Details", + "text_level": 1, + "bbox": [ + 89, + 760, + 302, + 776 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "The training procedure utilizes only the provided few-shot datasets (1-shot, 5-shot, and 10-shot variants), without incorporating additional external data. The trainable parameters are finetuned for each testing dataset around 100 epochs. The training batch size is 16, with a base learning rate of 0.002. The parameter $\\sigma$ in Soft Mask-Based Prototype Aggregation is set to 2.0. The parameter $\\tau$ in Temperature-Scaled Similarity Calibration is set to 0.07.", + "bbox": [ + 89, + 779, + 483, + 900 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Experiments are performed on four NVIDIA A100 GPUs.", + "bbox": [ + 511, + 90, + 898, + 106 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "5.4. IPC", + "text_level": 1, + "bbox": [ + 513, + 114, + 581, + 130 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "5.4.1. Proposed Method", + "text_level": 1, + "bbox": [ + 513, + 138, + 684, + 152 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "The IPC team utilizes CD-ViTO as the baseline, which is an improved version of the DE-ViT method, designed to enhance the cross-domain detection capability. To further mitigate performance degradation caused by cross-domain discrepancies and a very small number of test domain reference examples, this team was inspired by [59] to introduce a test-time adaptation algorithm during the inference phase.", + "bbox": [ + 511, + 156, + 905, + 263 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/3fc263e3ffbb96207506aa0a9167656cd36623edb2810e5480bf08198e1c4a2a.jpg", + "image_caption": [ + "Figure 17. Team IPC: overview of the proposed approach. The upper section represents the baseline CD-ViTO fine-tuning phase; the lower section represents the test-time adaptation (TTA) process. The TTA procedure operates without access to the original training data, updating the fine-tuned detector on a single testing image before making a prediction. Crucially, only the mask prediction module in CD-ViTO undergoes gradient updates during TTA iterations." + ], + "image_footnote": [], + "bbox": [ + 522, + 282, + 898, + 479 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/8c8332ca03b01b9b660825506ac66cda3019c014dd3bbd75b026f0da59b8569c.jpg", + "image_caption": [ + "Figure 18. Team IPC: by iteratively retaining proposals (yellow boxes $\\square$ ) with high confidence scores as pseudo labels (red boxes $\\square$ ), the model can effectively filter out most invalid detection boxes." + ], + "image_footnote": [], + "bbox": [ + 517, + 625, + 903, + 694 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "To be specific, the proposed approach employs an iterative process as shown in Fig 17. During each iteration $t$ (where $t \\in \\{1, \\dots, T\\}$ ), the existing detector $\\theta_{t-1}$ generates predictions $D_t = \\{(b_{t,i}, p_{t,i}) : \\forall i\\}$ for image $I$ , with $b_{t,i}$ representing the $i^{th}$ object's bounding box and $p_{t,i} \\in [0,1]^K$ denoting the class probability distribution across $K$ categories. The detection confidence $c_{t,i} \\in [0,1]$ is determined by the highest probability in $p_{t,i}$ , while the", + "bbox": [ + 511, + 779, + 906, + 902 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 490, + 924, + 508, + 936 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "corresponding class index gives the predicted object category $y_{t,i} \\in \\{1, \\dots, K\\}$ . Confident detections are then selected as pseudo-labels as illustrated in Fig 18: $P_t = \\{(b_{t,i}, y_{t,i}) : c_{t,i} > \\lambda_{conf}\\}$ , with $\\lambda_{conf}$ serving as the confidence cutoff. The detector is subsequently refined through gradient descent on these pseudo-labels, yielding an improved model $\\theta_t$ .", + "bbox": [ + 89, + 90, + 480, + 196 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "For the initial iteration $(t = 1)$ , the detector $\\theta_{t - 1}$ is initialized as $\\theta_0$ , which was pre-trained on source domain data. Upon completion of the final iteration $(t = T)$ , the optimized model $\\theta_T$ produces the final predictions for $I$ . Notably, this self-training paradigm maintains the original network architecture and operates without requiring access to source data or any other pretrained foundation models during adaptation.", + "bbox": [ + 89, + 196, + 480, + 318 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "5.4.2. Training Details", + "text_level": 1, + "bbox": [ + 89, + 327, + 250, + 340 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "A single NVIDIA A6000 GPU is used for all experiments. The proposed method extends the CD-ViTO baseline through a test-time adaptation pipeline, initialized with k-shot instance fine-tuning on novel support datasets. During inference, the proposed method processes each test image using momentum SGD ( $\\beta = 0.9$ , $\\alpha = 0.001$ ) to exclusively update the mask prediction module through 5 iterations. For all experimental datasets, the cut-off confidence threshold $\\lambda_{conf}$ is empirically set to 0.6.", + "bbox": [ + 89, + 345, + 482, + 482 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "5.5.LJY", + "text_level": 1, + "bbox": [ + 89, + 492, + 161, + 507 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "5.5.1. Proposed Method", + "text_level": 1, + "bbox": [ + 89, + 513, + 261, + 530 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "As shown in Fig. 19, the LJY team proposes similarity calibrated prototype refinement network, which utilizes query-aware guidelines to generate prototypes. The network contains a pretrained DINOv2 ViT, a region proposal network, an ROI align module, a detection head, and a one-vs-rest classification head. During the finetuning stage, the parameters of DINOv2 ViT are frozen. Only the parameters of the detection head and the classification head are finetuned.", + "bbox": [ + 89, + 534, + 482, + 654 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/a08ef0f9dc4809732e94f6a54a4e9fb6edf8cb2fe27dba8bc5508c95462c3ad5.jpg", + "image_caption": [ + "Figure 19. Team LJY: overall framework of SCPR." + ], + "image_footnote": [], + "bbox": [ + 96, + 666, + 480, + 835 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Given a query image $\\pmb{q} \\in \\mathbb{R}^{H \\times W \\times C}$ and a set of support images $S$ , where $H, W$ and $C$ stand for the num-", + "bbox": [ + 89, + 869, + 482, + 900 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "ber of height, width and channels, respectively, the DINOv2 ViT backbone is used for obtaining query patches $\\pmb{F}_{q} \\in \\mathbb{R}^{d}$ and support patches $\\pmb{F}_{s}$ . Then, two linear layers are applied to project the query patches $\\pmb{F}_{q}$ to $\\pmb{Q}$ and $\\pmb{K}_{1}$ and project the support patches $\\pmb{F}_{s}$ to $\\pmb{K}_{2}$ . The query patches $\\pmb{F}_{q}$ and the support patches $\\pmb{F}_{s}$ are then concatenated to obtain $\\pmb{F}_{cat} = \\text{Concat}(\\pmb{F}_{q}, \\pmb{F}_{s})$ . The concatenated patches $\\pmb{F}_{cat}$ are projected to obtain $\\pmb{V}$ . To align the query patches and the support patches, the proposed method conducts scaled dot product on query patches $\\pmb{F}_{q}$ and itself to obtain self attention score $A_{self} = \\frac{\\pmb{Q}\\pmb{K}_{1}^{\\top}}{\\sqrt{d}}$ . Meanwhile, cross-attention score is computed using cosine similarity to ensure scale invariance $A_{cross} = \\frac{\\pmb{Q}\\pmb{K}_{2}^{\\top}}{\\|\\pmb{Q}\\|_{2}\\|\\pmb{K}_{2}\\|_{2} + \\epsilon}$ where $\\epsilon$ is a small constant to avoid division by zero. The combined attention score is obtained by concatenating both and then be normalized by the softmax operation $A = \\text{Softmax}(\\text{Concat}(\\pmb{A}_{self}, \\pmb{A}_{cross}))$ . The refined query representation is obtained by applying attention weights to the value matrix $\\hat{\\pmb{F}}_{q} = \\pmb{F}_{q} + \\pmb{A}\\pmb{V}$ . With the aligned query patches, the proposed method then generates prototypes with query-perceptual information. To further calibrate support features, their cosine similarity with the refined query is computed: $Sim = \\text{Softmax}\\left(\\frac{\\pmb{F}_{s}\\pmb{F}_{q}^{\\top}}{\\|\\pmb{F}_{s}\\|_{2}\\|\\pmb{F}_{q}\\|_{2} + \\epsilon}\\right)$ . This similarity is used to re-weight the support representations: $\\hat{\\pmb{F}}_{s} = \\pmb{F}_{s} + Sim*\\hat{\\pmb{F}}_{q}$ . A learnable weighting function is applied via a sigmoid transformation: $W = Sigmoid(FC(\\hat{\\pmb{F}}_{s}))$ . Ensuring adaptive feature scaling: $\\hat{\\pmb{F}}_{s} = W\\cdot \\hat{\\pmb{F}}_{s}$ . The updated support features are then averaged across the K-shot dimension to derive refined prototypes: $P = \\frac{1}{K}\\sum_{i=1}^{K}\\hat{\\pmb{F}}_{s}$ . Finally, the query-aware prototype refinement is performed using a weighted combination of the refined prototypes and the original prototypes: $\\hat{\\pmb{P}} = \\alpha\\cdot\\pmb{P} + (1-\\alpha)\\cdot\\frac{1}{K}\\sum_{i=1}^{K}\\pmb{F}_{s}$ . This final prototype representation retains both source-domain knowledge and query-specific adaptability, effectively enhancing cross-domain few-shot detection performance.", + "bbox": [ + 511, + 90, + 906, + 652 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "5.5.2. Training Details", + "text_level": 1, + "bbox": [ + 511, + 659, + 671, + 674 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "The proposed modules are fine-tuned on novel support images, with the base DE-ViT pretrained on COCO taken as initialization. The SGD with a learning rate of 0.002 is used as the optimizer. All experiments are conducted on two RTX3090 GPUs. The mAPs for 1/5/10 shots are reported.", + "bbox": [ + 511, + 678, + 905, + 753 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Acknowledgments", + "text_level": 1, + "bbox": [ + 511, + 767, + 671, + 784 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "INSAIT, Sofia University \"St. Kliment Ohridski\". Partially funded by the Ministry of Education and Science of Bulgaria's support for INSAIT as part of the Bulgarian National Roadmap for Research Infrastructure. This work was partially supported by the Humboldt Foundation. We thank the NTIRE 2025 sponsors: ByteDance, Meituan, Kuaishou, and University of Wurzburg (Computer Vision Lab).", + "bbox": [ + 511, + 791, + 905, + 898 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 490, + 924, + 508, + 936 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "A. Teams and affiliations", + "text_level": 1, + "bbox": [ + 91, + 89, + 303, + 104 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "NTIRE 2025 team", + "text_level": 1, + "bbox": [ + 91, + 116, + 235, + 131 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Title: NTIRE 2025 Challenge on Cross-Domain Few-Shot Object Detection: Methods and Results.", + "bbox": [ + 89, + 140, + 482, + 171 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Members:", + "text_level": 1, + "bbox": [ + 91, + 171, + 163, + 183 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Yuqian Fu1 (yuqian.fu@insait.ai),", + "bbox": [ + 93, + 186, + 318, + 200 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Xingyu Qiu² (xyqiu24@m.fudan.edu.cn),", + "bbox": [ + 93, + 202, + 367, + 215 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Bin Ren $^{3,4}$ (bin.ren@unitn.it),", + "bbox": [ + 93, + 215, + 292, + 229 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Yanwei $\\mathrm{Fu}^2$ (yanweifu@fudan.edu.cn),", + "bbox": [ + 93, + 231, + 352, + 246 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Radu Timofte $^{5}$ (radu.timofte@uni-wuerzburg.de),", + "bbox": [ + 93, + 247, + 421, + 261 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Nicu Sebe4 (niculae.sebe@unitn.it),", + "bbox": [ + 93, + 262, + 331, + 276 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Ming-Hsuan Yang $^{6}$ (mhyang@ucmerced.edu),", + "bbox": [ + 93, + 277, + 400, + 291 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Luc Van Gool1 (luc.vangool@insait.ai)", + "bbox": [ + 93, + 292, + 351, + 306 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Affiliations:", + "text_level": 1, + "bbox": [ + 91, + 306, + 174, + 321 + ], + "page_idx": 16 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1 INSAIT, Sofia University St. Kliment Ohridski, Bulgaria", + "$^{2}$ Fudan University, China", + "3 University of Pisa, Italy", + "4 University of Trento, Italy", + "5 Computer Vision Lab, University of Würzburg, Germany", + "6 University of California at Merced, United States" + ], + "bbox": [ + 93, + 321, + 478, + 411 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "MoveFree", + "text_level": 1, + "bbox": [ + 91, + 443, + 173, + 457 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Title: Marrying MoE-powered Grounding DINO with Self-training for Cross-domain Few-shot Object Detection", + "bbox": [ + 89, + 467, + 482, + 498 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Members:", + "text_level": 1, + "bbox": [ + 91, + 500, + 163, + 511 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Kaijin Zhang $^{1}$ (zhang.kaijin1@zte.com.cn),", + "bbox": [ + 93, + 513, + 380, + 527 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Qingpeng Nong1 (nong.qingpeng@zte.com.cn),", + "bbox": [ + 93, + 529, + 408, + 542 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Xiugang Dong $^{1}$ (dong.xiugang20@zte.com.cn),", + "bbox": [ + 93, + 544, + 408, + 558 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Hong Gao $^{1}$ (gao.hong@zte.com.cn),", + "bbox": [ + 93, + 559, + 334, + 573 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Xiangsheng Zhou1 (zhou.xiangsheng@zte.com.cn)", + "bbox": [ + 93, + 574, + 429, + 589 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Affiliations:", + "text_level": 1, + "bbox": [ + 91, + 589, + 174, + 603 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "1 Central R & D Institute, ZTE", + "bbox": [ + 93, + 604, + 297, + 617 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "AI4EarthLab", + "text_level": 1, + "bbox": [ + 91, + 648, + 200, + 664 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Title: Enhance Then Search: An Augmentation-Search Strategy with Foundation Models for Cross-Domain Few-Shot Object Detection", + "bbox": [ + 89, + 674, + 482, + 718 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Members:", + "text_level": 1, + "bbox": [ + 91, + 720, + 161, + 733 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Jiancheng Pan1 (jiancheng.pan_plus@gmail.com),", + "bbox": [ + 93, + 734, + 419, + 750 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Yanxing Liu $^{2}$ (liuyanxing21@mails.ucas.ac.cn),", + "bbox": [ + 93, + 750, + 405, + 763 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Xiao He $^{3}$ (xiaohewhu@163.com),", + "bbox": [ + 93, + 765, + 313, + 779 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Jiahao Li1 (lijiahao23@mails.tsinghua.edu.cn),", + "bbox": [ + 93, + 780, + 398, + 795 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Yuze Sun $^{1}$ (syz23@mails.tsinghua.edu.cn),", + "bbox": [ + 93, + 796, + 374, + 809 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Xiaomeng Huang $^{1}$ (hxm@tsinghua.edu.cn)", + "bbox": [ + 93, + 810, + 375, + 825 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Affiliations:", + "text_level": 1, + "bbox": [ + 91, + 825, + 174, + 839 + ], + "page_idx": 16 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "$^{1}$ Tsinghua University", + "$^{2}$ University of Chinese Academy of Sciences", + "$^{3}$ Wuhan University" + ], + "bbox": [ + 93, + 840, + 393, + 885 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "IDCFS", + "text_level": 1, + "bbox": [ + 514, + 90, + 573, + 104 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Title: Pseudo-Label Driven Vision-Language Grounding for Cross-Domain Few-Shot Object Detection", + "bbox": [ + 513, + 112, + 903, + 142 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Members:", + "text_level": 1, + "bbox": [ + 514, + 143, + 584, + 156 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Zhenyu Zhang $^{1}$ (m202273680@hust.edu.cn),", + "bbox": [ + 514, + 157, + 813, + 172 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Ran Ma1 (ranma@hust.edu.cn),", + "bbox": [ + 514, + 172, + 725, + 186 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Yuhan Liu1 (yuhan.liu@hust.edu.cn),", + "bbox": [ + 514, + 188, + 764, + 203 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Zijian Zhuang $^{1}$ (zhuangzj@hust.edu.cn),", + "bbox": [ + 514, + 204, + 785, + 218 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Shuai Yi $^{1}$ (yishuai@hust.edu.cn),", + "bbox": [ + 514, + 218, + 736, + 233 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Yixiong Zou1 (yixiongz@hust.edu.cn)", + "bbox": [ + 514, + 234, + 769, + 248 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Affiliations:", + "text_level": 1, + "bbox": [ + 514, + 250, + 596, + 262 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "1 School of Computer Science and Technology, Huazhong University of Science and Technology", + "bbox": [ + 513, + 263, + 903, + 294 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "FDUROILab_Lenovo", + "text_level": 1, + "bbox": [ + 514, + 316, + 683, + 332 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Title: Efficient Tuning and MLLM-Based Post Prcessing for CDFSOD", + "bbox": [ + 513, + 339, + 903, + 369 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Members:", + "text_level": 1, + "bbox": [ + 514, + 369, + 584, + 383 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Lingyi Hong1 (lyhong22@m.fudan.edu.cn),", + "bbox": [ + 514, + 385, + 805, + 400 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Mingxi Cheng1(mxchen24@m.fudan.edu.cn),", + "bbox": [ + 514, + 401, + 818, + 415 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Runze Li $^{2}$ (lirz7@lenovo.com),", + "bbox": [ + 514, + 416, + 720, + 430 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Xingdong Sheng $^{2}$ (shengxd1@lenovo.com),", + "bbox": [ + 514, + 431, + 803, + 445 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Wenqiang Zhang $^{1,3}$ (wqzhang@fudan.edu.cn)", + "bbox": [ + 514, + 446, + 815, + 460 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Affiliations:", + "text_level": 1, + "bbox": [ + 514, + 462, + 596, + 474 + ], + "page_idx": 16 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "$^{1}$ Shanghai Key Lab of Intelligent Information Processing, School of Computer Science, Fudan University", + "2 Lenovo Research", + "3 Engineering Research Center of AI & Robotics, Ministry of Education, Academy for Engineering & Technology, Fudan University" + ], + "bbox": [ + 513, + 476, + 903, + 566 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "HUSTLab", + "text_level": 1, + "bbox": [ + 514, + 590, + 599, + 604 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Title: Prompt and Finetune Grounding DINO for Cross-Domain Few-shot Object Detection", + "bbox": [ + 513, + 612, + 903, + 642 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Members:", + "text_level": 1, + "bbox": [ + 514, + 643, + 584, + 656 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Weisen Chen $^{1}$ (U202115027@hust.edu.cn),", + "bbox": [ + 514, + 657, + 803, + 672 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Yongxin Yan $^{1}$ (2585856499@qq.com),", + "bbox": [ + 514, + 674, + 769, + 688 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Xinguo Chen $^{2}$ (327715@whut.edu.cn),", + "bbox": [ + 514, + 688, + 771, + 703 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Yuanjie Shao $^{1}$ (shaoyuanjie@hust.edu.cn),", + "bbox": [ + 514, + 704, + 792, + 718 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Zhengrong Zuo $^{1}$ (zhrzuo@main.hust.edu.cn),", + "bbox": [ + 514, + 719, + 812, + 733 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Nong Sang $^{1}$ (nsang@hust.edu.cn)", + "bbox": [ + 514, + 734, + 736, + 750 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Affiliations:", + "text_level": 1, + "bbox": [ + 514, + 763, + 596, + 777 + ], + "page_idx": 16 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1 School of Artificial Intelligence and Automation, Huazhong University of Science and Technology", + "$^{2}$ School of Information Engineering, Wuhan University of Technology" + ], + "bbox": [ + 514, + 779, + 903, + 839 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "TongjiLab", + "text_level": 1, + "bbox": [ + 514, + 848, + 599, + 864 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Title: ProtoDINO: Cross-Domain Few-Shot Object Detection via GroundingDINO and CLIP-Based Prototypes", + "bbox": [ + 513, + 869, + 903, + 901 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 490, + 924, + 508, + 936 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Members:", + "text_level": 1, + "bbox": [ + 91, + 90, + 163, + 104 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Hao $\\mathbf{W}\\mathbf{u}^{1}$ (haowu@tongji.edu.cn),", + "bbox": [ + 91, + 106, + 318, + 119 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Haoran Sun", + "bbox": [ + 91, + 121, + 179, + 133 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Affiliations:", + "bbox": [ + 91, + 136, + 174, + 150 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "$^{1}$ Tongji University", + "bbox": [ + 93, + 151, + 220, + 166 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Manifold", + "text_level": 1, + "bbox": [ + 91, + 191, + 166, + 205 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Title: CDFSOD Challenge: Using Grounding-DINO Proposals and ResNet Embeddings", + "bbox": [ + 91, + 213, + 482, + 244 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Members:", + "bbox": [ + 91, + 244, + 161, + 257 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Shuming Hu1 (hsm123@nudt.edu.cn),", + "bbox": [ + 91, + 258, + 346, + 273 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Yan Zhang1,", + "bbox": [ + 91, + 273, + 178, + 289 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Zhiguang Shi1,", + "bbox": [ + 91, + 290, + 194, + 304 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Yu Zhang1,", + "bbox": [ + 91, + 305, + 171, + 319 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Chao Chen1,", + "bbox": [ + 91, + 320, + 179, + 334 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Tao Wang", + "bbox": [ + 91, + 335, + 163, + 349 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Affiliations:", + "bbox": [ + 91, + 351, + 174, + 364 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "$^{1}$ National University of Defense Technology", + "bbox": [ + 93, + 364, + 390, + 380 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "MXT", + "text_level": 1, + "bbox": [ + 91, + 405, + 137, + 419 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Title: Domain Adaptation Enhancement Module (DAEM) for Cross-Domain Few-Shot Object Detection", + "bbox": [ + 91, + 428, + 482, + 458 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Members:", + "bbox": [ + 91, + 459, + 161, + 470 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Da Feng $^{1}$ (072108208@fzu.edu.cn),", + "bbox": [ + 91, + 473, + 333, + 487 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Linhai Zhuo $^{1}$ (534537916@qq.com),", + "bbox": [ + 91, + 488, + 339, + 503 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Ziming Lin", + "bbox": [ + 91, + 503, + 176, + 518 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Affiliations:", + "bbox": [ + 91, + 518, + 174, + 532 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "$^{1}$ Fuzhou University", + "bbox": [ + 93, + 534, + 227, + 549 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "X-Few", + "text_level": 1, + "bbox": [ + 91, + 574, + 145, + 587 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Title: IFC: Instance Feature Caching for Cross-Domain Few-Shot Object Detection", + "bbox": [ + 91, + 595, + 482, + 625 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Members:", + "bbox": [ + 91, + 627, + 161, + 638 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Yali Huang $^{1}$ (hyl2024@gs.zzu.edu.cn),", + "bbox": [ + 91, + 641, + 352, + 656 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Jie Mei $^{1}$ (mj123123@gs.zzu.edu.cn),", + "bbox": [ + 91, + 657, + 339, + 672 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Yiming Yang1 (yangyim637@gmail.com),", + "bbox": [ + 91, + 672, + 372, + 686 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Mi Guo $^{1}$ (mimi987836724@gs.zzu.edu.cn),", + "bbox": [ + 91, + 686, + 383, + 700 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Mingyuan Jiu $^{1,2,3}$ (iemyjiu@zzu.edu.cn),", + "bbox": [ + 91, + 700, + 366, + 717 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Mingliang Xu $^{1,2,3}$ (iexumingliang@zzu.edu.cn)", + "bbox": [ + 91, + 717, + 408, + 732 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Affiliations:", + "bbox": [ + 91, + 733, + 174, + 746 + ], + "page_idx": 17 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "$^{1}$ School of Computer and Artificial Intelligence, Zhengzhou University", + "$^{2}$ Engineering Research Center of Intelligent Swarm Systems, Ministry of Education, Zhengzhou University", + "$^{3}$ National SuperComputing Center in Zhengzhou" + ], + "bbox": [ + 91, + 747, + 482, + 823 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "MM", + "text_level": 1, + "bbox": [ + 91, + 847, + 130, + 861 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Title: DFE-ViT: Dual Feature Enhancement Network for Cross-Domain Few-Shot Object Detection.", + "bbox": [ + 89, + 869, + 482, + 900 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Members:", + "text_level": 1, + "bbox": [ + 513, + 90, + 584, + 104 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Maomao Xiong $^{1}$ (202314866@mail.sdu.edu.cn),", + "bbox": [ + 513, + 106, + 838, + 121 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Qunshu Zhang $^{1}$ (202414859@mail.sdu.edu.cn),", + "bbox": [ + 513, + 122, + 826, + 136 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Xinyu Cao $^{1}$ (202414842@mail.sdu.edu.cn)", + "bbox": [ + 513, + 137, + 797, + 151 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Affiliations:", + "bbox": [ + 514, + 152, + 598, + 165 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "1 Shandong University", + "bbox": [ + 514, + 166, + 666, + 181 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "FSV", + "text_level": 1, + "bbox": [ + 514, + 205, + 552, + 219 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Title: Enhanced Prototype-based Cross-domain Few-shot Object Detection", + "bbox": [ + 513, + 227, + 903, + 257 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Members:", + "bbox": [ + 514, + 258, + 584, + 271 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Yuqing Yang1 (yyqyang101@gmail.com)", + "bbox": [ + 514, + 273, + 790, + 287 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Affiliations:", + "bbox": [ + 514, + 289, + 596, + 303 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "1 George Mason University", + "bbox": [ + 514, + 303, + 697, + 319 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "IPC", + "text_level": 1, + "bbox": [ + 514, + 343, + 550, + 356 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Title: Test-time Adaptation Strategy for Cross-Domain Few-Shot Object Detection", + "bbox": [ + 513, + 364, + 903, + 393 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Members:", + "bbox": [ + 514, + 396, + 584, + 407 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Dianmo Sheng $^{1}$ (dmsheng@mail.ustc.edu.cn),", + "bbox": [ + 514, + 410, + 821, + 425 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Xuanpu Zhao1,", + "bbox": [ + 514, + 425, + 617, + 440 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Zhiyu Li1,", + "bbox": [ + 514, + 440, + 586, + 455 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Xuyang Ding", + "bbox": [ + 514, + 455, + 612, + 470 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Affiliations:", + "bbox": [ + 514, + 470, + 596, + 484 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "1 University of Science and Technology of China", + "bbox": [ + 514, + 486, + 839, + 501 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "LJY", + "text_level": 1, + "bbox": [ + 514, + 525, + 550, + 540 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Title: Similarity-Calibrated Prototype Refinement for Cross-Domain Few-Shot Object Detection", + "bbox": [ + 513, + 547, + 903, + 577 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Members:", + "bbox": [ + 514, + 579, + 584, + 590 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Wenqian Li (wenqianli.li@seu.edu.cn)", + "bbox": [ + 514, + 593, + 769, + 608 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Affiliations:", + "bbox": [ + 514, + 609, + 596, + 623 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Southeast University", + "bbox": [ + 514, + 625, + 653, + 638 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 490, + 924, + 508, + 936 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 91, + 90, + 187, + 104 + ], + "page_idx": 18 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Shuai Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Sibo Song, Kai Dang, Peng Wang, Shijie Wang, Jun Tang, et al. Qwen2. 5-vl technical report. arXiv preprint arXiv:2502.13923, 2025.8, 9", + "[2] Weilin Cai, Juyong Jiang, Fan Wang, Jing Tang, Sunghun Kim, and Jiayi Huang. A survey on mixture of experts. arXiv preprint arXiv:2407.06204, 2024. 5", + "[3] Zheng Chen, Kai Liu, Jue Gong, Jingkai Wang, Lei Sun, Zongwei Wu, Radu Timofte, Yulun Zhang, et al. Ntire 2025 challenge on image super-resolution $(\\times 4)$ : Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2", + "[4] Zheng Chen, Jingkai Wang, Kai Liu, Jue Gong, Lei Sun, Zongwei Wu, Radu Timofte, Yulun Zhang, et al. Ntire 2025 challenge on real-world face restoration: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2", + "[5] Marcos Conde, Radu Timofte, et al. Ntire 2025 challenge on raw image restoration and super-resolution. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2", + "[6] Marcos Conde, Radu Timofte, et al. Raw image reconstruction from RGB on smartphones. ntire 2025 challenge report. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2", + "[7] Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. Bert: Pre-training of deep bidirectional transformers for language understanding. In Proceedings of the 2019 conference of the North American chapter of the association for computational linguistics: human language technologies, volume 1 (long and short papers), pages 4171–4186, 2019. 7", + "[8] Geir Drange. Arthropod taxonomy orders object detection dataset. In https://doi.org/10.34740/kaggle/dsv/1240192, 2019.2", + "[9] Egor Ershov, Sergey Korchagin, Alexei Khalin, Artyom Panshin, Arseniy Terekhin, Ekaterina Zaychenkova, Georgiy Lobarev, Vsevolod Plokhotnyuk, Denis Abramov, Elisey Zhdanov, Sofia Dorogova, Yasin Mamedov, Nikola Banic, Georgii Perevozhikov, Radu Timofte, et al. Ntire 2025 challenge on night photography rendering. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2", + "[10] William Fedus, Barret Zoph, and Noam Shazeer. Switch transformers: Scaling to trillion parameter models with simple and efficient sparsity. Journal of Machine Learning Research, 23(120):1-39, 2022. 5", + "[11] Shenghao Fu, Qize Yang, Qijie Mo, Junkai Yan, Xihan Wei, Jingke Meng, Xiaohua Xie, and Wei-Shi Zheng. Llmdet: Learning strong open-vocabulary object detectors under the supervision of large language models. arXiv preprint arXiv:2501.18954, 2025. 8, 9", + "[12] Yuqian Fu, Yanwei Fu, and Yu-Gang Jiang. Meta-fdmixup: Cross-domain few-shot learning guided by labeled target" + ], + "bbox": [ + 93, + 114, + 483, + 900 + ], + "page_idx": 18 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "data. In Proceedings of the 29th ACM international conference on multimedia, pages 5326-5334, 2021. 1", + "[13] Yuqian Fu, Yu Xie, Yanwei Fu, Jingjing Chen, and Yu-Gang Jiang. Me-d2n: Multi-expert domain decompositional network for cross-domain few-shot learning. In Proceedings of the 30th ACM international conference on multimedia, pages 6609-6617, 2022.", + "[14] Yuqian Fu, Yu Xie, Yanwei Fu, and Yu-Gang Jiang. Styleadv: Meta style adversarial training for cross-domain few-shot learning. In CVPR, 2023. 1", + "[15] Yuqian Fu, Yu Wang, Yixuan Pan, Lian Huai, Xingyu Qiu, Zeyu Shangguan, Tong Liu, Yanwei Fu, Luc Van Gool, and Xingqun Jiang. Cross-domain few-shot object detection via enhanced open-set object detector. In European Conference on Computer Vision, pages 247-264. Springer, 2024. 1, 2, 4, 6", + "[16] Yuqian Fu, Xingyu Qiu, Bin Ren Yanwei Fu, Radu Timofte, Nicu Sebe, Ming-Hsuan Yang, Luc Van Gool, et al. Ntire 2025 challenge on cross-domain few-shot object detection: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2", + "[17] Golnaz Ghiasi, Yin Cui, Aravind Srinivas, Rui Qian, Tsung-Yi Lin, Ekin D Cubuk, Quoc V Le, and Barret Zoph. Simple copy-paste is a strong data augmentation method for instance segmentation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 2918-2928, 2021. 9", + "[18] Yunhui Guo, Noel C Codella, Leonid Karlinsky, James V Codella, John R Smith, Kate Saenko, Tajana Rosing, and Rogerio Feris. A broader study of cross-domain few-shot learning. In Computer vision-ECCV 2020: 16th European conference, Glasgow, UK, August 23-28, 2020, proceedings, part XXVII 16, pages 124-141. Springer, 2020. 1", + "[19] Shuhao Han, Haotian Fan, Fangyuan Kong, Wenjie Liao, Chunle Guo, Chongyi Li, Radu Timofte, et al. Ntire 2025 challenge on text to image generation model quality assessment. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2", + "[20] Meng-Ru Hsieh, Yen-Liang Lin, and Winston H Hsu. Drone-based object counting by spatially regularized regional proposal network. In Proceedings of the IEEE international conference on computer vision, pages 4145-4153, 2017. 1, 2, 8, 9, 13", + "[21] Edward J Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, Weizhu Chen, et al. Lora: Low-rank adaptation of large language models. *ICLR*, 1(2):3, 2022. 7", + "[22] Gabriel Ilharco, Mitchell Wortsman, Ross Wightman, Cade Gordon, Nicholas Carlini, Rohan Taori, Achal Dave, Vaishaal Shankar, Hongseok Namkoong, John Miller, Hannaneh Hajishirzi, Ali Farhadi, and Ludwig Schmidt. Openclip, 2021. 10", + "[23] Naoto Inoue, Ryosuke Furuta, Toshihiko Yamasaki, and Kiyoharu Aizawa. Cross-domain weakly-supervised object detection through progressive domain adaptation. In CVPR, 2018. 2" + ], + "bbox": [ + 516, + 92, + 906, + 898 + ], + "page_idx": 18 + }, + { + "type": "page_number", + "text": "19", + "bbox": [ + 490, + 924, + 506, + 936 + ], + "page_idx": 18 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[24] Varun Jain, Zongwei Wu, Quan Zou, Louis Florentin, Henrik Turbell, Sandeep Siddhartha, Radu Timofte, et al. Ntire 2025 challenge on video quality enhancement for video conferencing: Datasets, methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2", + "[25] Albert Q Jiang, Alexandre Sablayrolles, Antoine Roux, Arthur Mensch, Blanche Savary, Chris Bamford, Devendra Singh Chaplot, Diego de las Casas, Emma Bou Hanna, Florian Bressand, et al. Mixtral of experts. arXiv preprint arXiv:2401.04088, 2024. 5", + "[26] Lihao Jiang, Yi Wang, Qi Jia, Shengwei Xu, Yu Liu, Xin Fan, Haojie Li, Risheng Liu, Xinwei Xue, and Ruili Wang. Underwater species detection using channel sharpening attention. In ACM MM, 2021. 2", + "[27] Aishwarya Kamath, Mannat Singh, Yann LeCun, Gabriel Synnaeve, Ishan Misra, and Nicolas Carion. Mdetr-modulated detection for end-to-end multi-modal understanding. In Proceedings of the IEEE/CVF international conference on computer vision, pages 1780-1790, 2021. 7, 8", + "[28] Mona Köhler, Markus Eisenbach, and Horst-Michael Gross. Few-shot object detection: A comprehensive survey. IEEE Transactions on Neural Networks and Learning Systems, 2023. 1", + "[29] Ranjay Krishna, Yuke Zhu, Oliver Groth, Justin Johnson, Kenji Hata, Joshua Kravitz, Stephanie Chen, Yannis Kalantidis, Li-Jia Li, David A Shamma, et al. Visual genome: Connecting language and vision using crowdsourced dense image annotations. International journal of computer vision, 123:32-73, 2017. 7", + "[30] Alex Krizhevsky, Ilya Sutskever, and Geoffrey E Hinton. Imagenet classification with deep convolutional neural networks. In Advances in Neural Information Processing Systems, pages 1097-1105, 2012.", + "[31] Alina Kuznetsova, Hassan Rom, Neil Alldrin, Jasper Uijlings, Ivan Krasin, Jordi Pont-Tuset, Shahab Kamali, Stefan Popov, Matteo Malloci, Alexander Kolesnikov, et al. The open images dataset v4: Unified image classification, object detection, and visual relationship detection at scale. International journal of computer vision, 128(7):1956-1981, 2020. 7, 8", + "[32] Sangmin Lee, Eunpil Park, Angel Canelo, Hyunhee Park, Youngjo Kim, Hyungju Chun, Xin Jin, Chongyi Li, Chun-Le Guo, Radu Timofte, et al. Ntire 2025 challenge on efficient burst hdr and restoration: Datasets, methods, and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2", + "[33] Chunyuan Li, Haotian Liu, Liunian Li, Pengchuan Zhang, Jyoti Aneja, Jianwei Yang, Ping Jin, Houdong Hu, Zicheng Liu, Yong Jae Lee, et al. Elevater: A benchmark and toolkit for evaluating language-augmented visual models. Advances in Neural Information Processing Systems, 35:9287-9301, 2022. 8", + "[34] Ke Li, Gang Wan, Gong Cheng, Liqui Meng, and Junwei Han. Object detection in optical remote sensing images: A survey and a new benchmark. ISPRS, 2020. 2", + "[35] Liunian Harold Li, Pengchuan Zhang, Haotian Zhang, Jianwei Yang, Chunyuan Li, Yiwu Zhong, Lijuan Wang, Lu" + ], + "bbox": [ + 91, + 92, + 485, + 902 + ], + "page_idx": 19 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Yuan, Lei Zhang, Jenq-Neng Hwang, et al. Grounded language-image pre-training. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 10965-10975, 2022. 4, 7", + "[36] Wei-Hong Li, Xialei Liu, and Hakan Bilen. Cross-domain few-shot learning with task-specific adapters. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 7161-7170, 2022. 1", + "[37] Xin Li, Yeying Jin, Xin Jin, Zongwei Wu, Bingchen Li, Yufei Wang, Wenhan Yang, Yu Li, Zhibo Chen, Bihan Wen, Robby Tan, Radu Timofte, et al. Ntire 2025 challenge on day and night raindrop removal for dual-focused images: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2", + "[38] Xin Li, Xijun Wang, Bingchen Li, Kun Yuan, Yizhen Shao, Suhang Yao, Ming Sun, Chao Zhou, Radu Timofte, and Zhibo Chen. Ntire 2025 challenge on short-formUGC video quality assessment and enhancement: Kwaisr dataset and study. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2", + "[39] Xin Li, Kun Yuan, Bingchen Li, Fengbin Guan, Yizhen Shao, Zihao Yu, Xijun Wang, Yiting Lu, Wei Luo, Suhang Yao, Ming Sun, Chao Zhou, Zhibo Chen, Radu Timofte, et al. Ntire 2025 challenge on short-formUGC video quality assessment and enhancement: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2", + "[40] Jie Liang, Radu Timofte, Qiaosi Yi, Zhengqiang Zhang, Shuaizheng Liu, Lingchen Sun, Rongyuan Wu, Xindong Zhang, Hui Zeng, Lei Zhang, et al. Ntire 2025 the 2nd restore any image model (RAIM) in the wild challenge. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2", + "[41] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dólar, and C Lawrence Zitnick. Microsoft coco: Common objects in context. In Computer vision-ECCV 2014: 13th European conference, zurich, Switzerland, September 6-12, 2014, proceedings, part v 13, pages 740-755. Springer, 2014. 1, 2, 8", + "[42] Aixin Liu, Bei Feng, Bin Wang, Bingxuan Wang, Bo Liu, Chenggang Zhao, Chengqi Dengr, Chong Ruan, Damai Dai, Daya Guo, et al. Deepseek-v2: A strong, economical, and efficient mixture-of-experts language model. arXiv preprint arXiv:2405.04434, 2024. 5", + "[43] Shilong Liu, Zhaoyang Zeng, Tianhe Ren, Feng Li, Hao Zhang, Jie Yang, Qing Jiang, Chunyuan Li, Jianwei Yang, Hang Su, et al. Grounding dino: Marrying dino with grounded pre-training for open-set object detection. In European Conference on Computer Vision, pages 38-55. Springer, 2024. 4, 6, 7, 9, 10", + "[44] Xiaohong Liu, Xiongkuo Min, Qiang Hu, Xiaoyun Zhang, Jie Guo, et al. Ntire 2025 XGC quality assessment challenge: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + ], + "bbox": [ + 516, + 92, + 903, + 900 + ], + "page_idx": 19 + }, + { + "type": "page_number", + "text": "20", + "bbox": [ + 488, + 924, + 508, + 936 + ], + "page_idx": 19 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[45] Xiaoning Liu, Zongwei Wu, Florin-Alexandru Vasluianu, Hailong Yan, Bin Ren, Yulun Zhang, Shuhang Gu, Le Zhang, Ce Zhu, Radu Timofte, et al. Ntire 2025 challenge on low light image enhancement: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2", + "[46] Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, and Baining Guo. Swin transformer: Hierarchical vision transformer using shifted windows. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 10012-10022, 2021. 7, 9", + "[47] Alexander Neubeck and Luc Van Gool. Efficient nonmaximum suppression. In 18th international conference on pattern recognition (ICPR'06), pages 850-855. IEEE, 2006. 7", + "[48] Maxime Oquab, Timothee Darct, Theo Moutakanni, Huy Vo, Marc Szafraniec, Vasil Khalidov, Pierre Fernandez, Daniel Haziza, Francisco Massa, Alaaeldin El-Nouby, et al. Dinov2: Learning robust visual features without supervision. arXiv preprint arXiv:2304.07193, 2023. 14", + "[49] Vicente Ordonez, Girish Kulkarni, and Tamara Berg. Im2text: Describing images using 1 million captioned photographs. Advances in neural information processing systems, 24, 2011. 7", + "[50] Hongpeng Pan, Shifeng Yi, Shouwei Yang, Lei Qi, Bing Hu, Yi Xu, and Yang Yang. The solution for cvpr2024 foundational few-shot object detection challenge. arXiv preprint arXiv:2406.12225, 2024. 9", + "[51] Jiancheng Pan, Yanxing Liu, Yuqian Fu, Muyuan Ma, Jiaohao Li, Danda Pani Paudel, Luc Van Gool, and Xiaomeng Huang. Locate anything on earth: Advancing open-vocabulary object detection for remote sensing community, 2024. 6", + "[52] Jiancheng Pan, Muyuan Ma, Qing Ma, Cong Bai, and Shengyong Chen. Pir: Remote sensing image-text retrieval with prior instruction representation learning, 2024. 6", + "[53] Limeng Qiao, Yuxuan Zhao, Zhiyuan Li, Xi Qiu, Jianan Wu, and Chi Zhang. Defrcn: Decoupled faster r-cnn for few-shot object detection. In ICCV, 2021. 1", + "[54] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PmLR, 2021. 10", + "[55] Bin Ren, Yahui Liu, Yue Song, Wei Bi, Rita Cucchiara, Nicu Sebe, and Wei Wang. Masked jigsaw puzzle: A versatile position embedding for vision transformers. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 20382-20391, 2023. 1", + "[56] Bin Ren, Yawei Li, Jingyun Liang, Rakesh Ranjan, Mengyuan Liu, Rita Cucchiara, Luc V Gool, Ming-Hsuan Yang, and Nicu Sebe. Sharing key semantics in transformer makes efficient image restoration. Advances in Neural Information Processing Systems, 37:7427-7463, 2024. 1", + "[57] Bin Ren, Hang Guo, Lei Sun, Zongwei Wu, Radu Timofte, Yawei Li, et al. The tenth nitre 2025 efficient superresolution challenge report. In Proceedings of the IEEE/CVF" + ], + "bbox": [ + 91, + 92, + 480, + 900 + ], + "page_idx": 20 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2", + "[58] Tianhe Ren, Qing Jiang, Shilong Liu, Zhaoyang Zeng, Wenlong Liu, Han Gao, Hongjie Huang, Zhengyu Ma, Xiaoke Jiang, Yihao Chen, et al. Grounding dino 1.5: Advance the\" edge\" of open-set object detection. arXiv preprint arXiv:2405.10300, 2024. 4", + "[59] Xiaoqian Ruan and Wei Tang. Fully test-time adaptation for object detection. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1038-1047, 2024. 15", + "[60] Inkyu Sa, Zongyuan Ge, Feras Dayoub, Ben Upcroft, Tristan Perez, and Chris McCool. Deepfruits: A fruit detection system using deep neural networks. sensors, 16(8):1222, 2016. 1, 2, 8, 9, 13", + "[61] Nickolay Safonov, Alexey Bryntsev, Andrey Moskalenko, Dmitry Kulikov, Dmitriy Vatolin, Radu Timofte, et al. Ntire 2025 challenge on UGC video enhancement: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2", + "[62] Alzayat Saleh, Issam H Laradji, Dmitry A Konovalov, Michael Bradley, David Vazquez, and Marcus Sheaves. A realistic fish-habitat dataset to evaluate algorithms for underwater visual analysis. Scientific Reports, 2020. 2", + "[63] Zeyu Shangguan and Mohammad Rostami. Identification of novel classes for improving few-shot object detection. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 3356-3366, 2023. 1", + "[64] Zeyu Shangguan and Mohammad Rostami. Improved region proposal network for enhanced few-shot object detection. Neural Networks, 180:106699, 2024. 1", + "[65] Shuai Shao, Zeming Li, Tianyuan Zhang, Chao Peng, Gang Yu, Xiangyu Zhang, Jing Li, and Jian Sun. Objects365: A large-scale, high-quality dataset for object detection. In Proceedings of the IEEE/CVF international conference on computer vision, pages 8430-8439, 2019. 8", + "[66] Jake Snell, Kevin Swersky, and Richard Zemel. Prototypical networks for few-shot learning. Advances in neural information processing systems, 30, 2017. 10", + "[67] Kechen Song and Yunhui Yan. A noise robust method based on completed local binary patterns for hot-rolled steel strip surface defects. Applied Surface Science, 2013. 2", + "[68] Bo Sun, Banghuai Li, Shengcai Cai, Ye Yuan, and Chi Zhang. Fsce: Few-shot object detection via contrastive proposal encoding. In CVPR, 2021. 1", + "[69] Lei Sun, Andrea Alfarano, Peiqi Duan, Shaolin Su, Kaiwei Wang, Boxin Shi, Radu Timofte, Danda Pani Paudel, Luc Van Gool, et al. Ntire 2025 challenge on event-based image deblurring: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2", + "[70] Lei Sun, Hang Guo, Bin Ren, Luc Van Gool, Radu Timofte, Yawei Li, et al. The tenth nitre 2025 image denoising challenge report. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + ], + "bbox": [ + 516, + 92, + 906, + 900 + ], + "page_idx": 20 + }, + { + "type": "page_number", + "text": "21", + "bbox": [ + 488, + 924, + 506, + 936 + ], + "page_idx": 20 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[71] Hao Tang, Chengcheng Yuan, Zechao Li, and Jinhui Tang. Learning attention-guided pyramidal features for few-shot fine-grained recognition. Pattern Recognition, 130:108792, 2022. 1", + "[72] Hung-Yu Tseng, Hsin-Ying Lee, Jia-Bin Huang, and Ming-Hsuan Yang. Cross-domain few-shot classification via learned feature-wise transformation. arXiv preprint arXiv:2001.08735, 2020. 1", + "[73] Florin-Alexandru Vasluianu, Tim Seizinger, Zhuyun Zhou, Cailian Chen, Zongwei Wu, Radu Timofte, et al. Ntire 2025 image shadow removal challenge report. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2", + "[74] Florin-Alexandru Vasluianu, Tim Seizinger, Zhuyun Zhou, Zongwei Wu, Radu Timofte, et al. Ntire 2025 ambient lighting normalization challenge. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2", + "[75] Xin Wang, Thomas E Huang, Trevor Darrell, Joseph E Gonzalez, and Fisher Yu. Frustratingly simple few-shot object detection. arXiv preprint arXiv:2003.06957, 2020. 1", + "[76] Xinkuang Wang, Wenjing Li, and Zhongcheng Wu. Cardd: A new dataset for vision-based car damage detection. IEEE Transactions on Intelligent Transportation Systems, 24(7): 7202-7214, 2023. 1, 2, 9, 13", + "[77] Yingqian Wang, Zhengyu Liang, Fengyuan Zhang, Lvli Tian, Longguang Wang, Juncheng Li, Jungang Yang, Radu Timofte, Yulan Guo, et al. Ntire 2025 challenge on light field image super-resolution: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2", + "[78] Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chau-mond, Clement Delangue, Anthony Moi, Pierrick Cistac, Tim Rault, Rémi Louf, Morgan Funtowicz, et al. Huggingface's transformers: State-of-the-art natural language processing. arXiv preprint arXiv:1910.03771, 2019. 8", + "[79] Dongxian Wu, Shu-Tao Xia, and Yisen Wang. Adversarial weight perturbation helps robust generalization. Advances in neural information processing systems, 33:2958-2969, 2020. 9", + "[80] Fuzhao Xue, Zian Zheng, Yao Fu, Jinjie Ni, Zangwei Zheng, Wangchunshu Zhou, and Yang You. Openmoe: An early effort on open mixture-of-experts language models. arXiv preprint arXiv:2402.01739, 2024. 5", + "[81] Kangning Yang, Jie Cai, Ling Ouyang, Florin-Alexandru Vasluianu, Radu Timofte, Jiaming Ding, Huiming Sun, Lan Fu, Jinlong Li, Chiu Man Ho, Zibo Meng, et al. Ntire 2025 challenge on single image reflection removal in the wild: Datasets, methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2", + "[82] Pierluigi Zama Ramirez, Fabio Tosi, Luigi Di Stefano, Radu Timofte, Alex Costanzino, Matteo Poggi, Samuele Salti, Stefano Mattoccia, et al. Ntire 2025 challenge on hr depth from images of specular and transparent surfaces. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + ], + "bbox": [ + 91, + 92, + 480, + 898 + ], + "page_idx": 21 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[83] Zican Zha, Hao Tang, Yunlian Sun, and Jinhui Tang. Boosting few-shot fine-grained recognition with background suppression and foreground alignment. IEEE Transactions on Circuits and Systems for Video Technology, 33(8):3947-3961, 2023. 1", + "[84] Ji Zhang, Jingkuan Song, Lianli Gao, and Hengtao Shen. Free-lunch for cross-domain few-shot learning: Style-aware episodic training with robust contrastive learning. In Proceedings of the 30th ACM international conference on multimedia, pages 2586-2594, 2022. 1", + "[85] Xinyu Zhang, Yuhan Liu, Yuting Wang, and Abdeslam Boularias. Detect everything with few examples. arXiv preprint arXiv:2309.12969, 2023. 1, 3", + "[86] Linhai Zhuo, Yuqian Fu, Jingjing Chen, Yixin Cao, and YuGang Jiang. Tgdm: Target guided dynamic mixup for cross-domain few-shot learning. In Proceedings of the 30th ACM International Conference on Multimedia, pages 6368-6376, 2022. 1", + "[87] Linhai Zhuo, Yuqian Fu, Jingjing Chen, Yixin Cao, and YuGang Jiang. Unified view empirical study for large pretrained model on cross-domain few-shot learning. ACM Transactions on Multimedia Computing, Communications and Applications, 20(9):1-18, 2024. 1" + ], + "bbox": [ + 516, + 92, + 903, + 415 + ], + "page_idx": 21 + }, + { + "type": "page_number", + "text": "22", + "bbox": [ + 488, + 924, + 506, + 935 + ], + "page_idx": 21 + } +] \ No newline at end of file diff --git a/data/2025/2504_10xxx/2504.10685/2b7c0cf2-f712-45f3-86c5-afe1fcf3d48b_model.json b/data/2025/2504_10xxx/2504.10685/2b7c0cf2-f712-45f3-86c5-afe1fcf3d48b_model.json new file mode 100644 index 0000000000000000000000000000000000000000..840de1c2de449a7775eba5f36649a3fe5be8ec9b --- /dev/null +++ b/data/2025/2504_10xxx/2504.10685/2b7c0cf2-f712-45f3-86c5-afe1fcf3d48b_model.json @@ -0,0 +1,5854 @@ +[ + [ + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.263, + 0.061, + 0.706 + ], + "angle": 270, + "content": "arXiv:2504.10685v1 [cs.CV] 14 Apr 2025" + }, + { + "type": "title", + "bbox": [ + 0.149, + 0.13, + 0.849, + 0.175 + ], + "angle": 0, + "content": "NTIRE 2025 Challenge on Cross-Domain Few-Shot Object Detection: Methods and Results" + }, + { + "type": "text", + "bbox": [ + 0.15, + 0.205, + 0.867, + 0.402 + ], + "angle": 0, + "content": "Yuqian Fu\\* Xingyu Qiu\\* Bin Ren\\* Yanwei Fu\\* Radu Timofte\\* Nicu Sebe\\* Ming-Hsuan Yang\\* Luc Van Gool\\* Kaijin Zhang Qingpeng Nong Xiugang Dong Hong Gao Xiangsheng Zhou Jiancheng Pan Yanxing Liu Xiao He Jiahao Li Yuze Sun Xiaomeng Huang Zhenyu Zhang Ran Ma Yuhan Liu Zijian Zhuang Shuai Yi Yixiong Zou Lingyi Hong Mingxi Chen Runze Li Xingdong Sheng Wenqiang Zhang Weisen Chen Yongxin Yan Xinguo Chen Yuanjie Shao Zhengrong Zuo Nong Sang Hao Wu Haoran Sun Shuming Hu Yan Zhang Zhiguang Shi Yu Zhang Chao Chen Tao Wang Da Feng Linhai Zhuo Ziming Lin Yali Huang Jie Me Yiming Yang Mi Guo Mingyuan Jiu Mingliang Xu Maomao Xiong Qunshu Zhang Xinyu Cao Yuqing Yang Dianmo Sheng Xuanpu Zhao Zhiyu Li Xuyang Ding Wenqian Li" + }, + { + "type": "title", + "bbox": [ + 0.249, + 0.433, + 0.327, + 0.449 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.465, + 0.486, + 0.707 + ], + "angle": 0, + "content": "Cross-Domain Few-Shot Object Detection (CD-FSOD) poses significant challenges to existing object detection and few-shot detection models when applied across domains. In conjunction with NTIRE 2025, we organized the 1st CD-FSOD Challenge, aiming to advance the performance of current object detectors on entirely novel target domains with only limited labeled data. The challenge attracted 152 registered participants, received submissions from 42 teams, and concluded with 13 teams making valid final submissions. Participants approached the task from diverse perspectives, proposing novel models that achieved new state-of-the-art (SOTA) results under both open-source and closed-source settings. In this report, we present an overview of the 1st NTIRE 2025 CD-FSOD Challenge, highlighting the proposed solutions and summarizing the results submitted by the participants." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.734, + 0.222, + 0.75 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.759, + 0.483, + 0.821 + ], + "angle": 0, + "content": "Few-shot object detection (FSOD) [28] aims at allowing models to detect novel objects using minimal labeled examples. While significant progress has been made, existing FSOD methods [53, 63, 64, 68, 75, 85] typically as" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.435, + 0.908, + 0.602 + ], + "angle": 0, + "content": "sume that the training (source) and testing (target) data are drawn from the same domain. However, this assumption rarely holds in real-world applications. For instance, a model trained on natural images such as those in MS-COCO [41] may face substantial challenges when applied to a novel domain like remote sensing imagery. This cross-domain few-shot learning (CD-FSL) problem has attracted considerable attention in the context of classification [12-14, 18, 36, 55, 56, 71, 72, 83, 84, 86, 87], whereas its extension to object detection—i.e., cross-domain few-shot object detection (CD-FSOD)—remains much less explored." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.607, + 0.909, + 0.759 + ], + "angle": 0, + "content": "Upon gaping at this gap, one recent work, CD-ViTO [15], reveals that the different object detection datasets exhibit various characters in style, inter-class variance (ICV), and indefinable boundaries (IB). To further investigate how these factors affect the CD-FSOD, CD-ViTO thus proposes a new benchmark which takes MS-COCO as the source domain and six distinct datasets with diverse style, ICV, IB as unseen targets. Results indicate that the prior detectors all fail to generalize to those targets when the domain gap issue is observed." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.765, + 0.909, + 0.903 + ], + "angle": 0, + "content": "To further promote the advances on CD-FSOD, we newly introduce three more unseen targets, DeepFruits [60], Carpk [20], and CarDD [76] as testbeds for the CD-FSOD detectors. Following the observations in CD-ViTO, these three targets have domains different from the source data, with varying styles, ICV, and IB. Furthermore, to maximally boost the performance of models, we define the task setting proposed in CD-ViTO as closed-source CD-FSOD, while further introducing the new open-source CD-FSOD" + }, + { + "type": "page_footnote", + "bbox": [ + 0.09, + 0.827, + 0.483, + 0.901 + ], + "angle": 0, + "content": "* Yuqian Fu, Xingyu Qiu, Bin Ren, Yanwei Fu, Radu Timofte, Nicu Sebe, Ming-Hsuan Yang, and Luc Van Gool are the NTIRE2025 challenge organizers. The other authors are participants in this challenge. Appendix A contains the authors' team names and affiliations. NTIRE2025 webpage: https://cvlai.net/ntire/2025/. Challenge Codes: https://github.com/lovelyqian/NTIRE2025_CDFSOD." + }, + { + "type": "page_number", + "bbox": [ + 0.495, + 0.925, + 0.504, + 0.936 + ], + "angle": 0, + "content": "1" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.09, + 0.092, + 0.482, + 0.182 + ], + "angle": 0, + "content": "setting. To be specific, the closed-source setting means the source data for model training is strictly limited, e.g., MS-COCO as in CD-ViTO; while the open-source setting relaxes this limitation and allows the participants to leverage diverse knowledge sources and foundation models to explore the upper bound on the target domains." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.183, + 0.482, + 0.379 + ], + "angle": 0, + "content": "In collaboration with the 2025 New Trends in Image Restoration and Enhancement (NTIRE 2025) Workshop, which is particularly interested in the model robustness under changing conditions, we present the 1st CD-FSOD Challenge. It features an open-source CD-FSOD as the main track and a closed-source CD-FSOD as a special track. For the closed-source track, MS-COCO serves as the sole source domain. The validation phase includes six target domains proposed in CD-ViTO. Three additional novel domains are used as the final test sets for both tracks. Mean Average Precision (mAP) is employed as the ranking metric. We believe this challenge will drive progress in the CD-FSOD field and foster meaningful algorithmic innovations." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.379, + 0.483, + 0.651 + ], + "angle": 0, + "content": "This challenge is one of the NTIRE \\(2025^{1}\\) Workshop associated challenges on: ambient lighting normalization [74], reflection removal in the wild [81], shadow removal [73], event-based image deblurring [69], image denoising [70], XGC quality assessment [44], UGC video enhancement [61], night photography rendering [9], image super-resolution (x4) [3], real-world face restoration [4], efficient super-resolution [57], HR depth estimation [82], efficient burst HDR and restoration [32], cross-domain few-shot object detection [16], short-form UGC video quality assessment and enhancement [38, 39], text to image generation model quality assessment [19], day and night raindrop removal for dual-focused images [37], video quality assessment for video conferencing [24], low light image enhancement [45], light field super-resolution [77], restore any image model (RAIM) in the wild [40], raw restoration and super-resolution [5], and raw reconstruction from RGB on smartphones [6]." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.664, + 0.402, + 0.681 + ], + "angle": 0, + "content": "2. NTIRE 2025 CD-FSOD Challenge" + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.689, + 0.283, + 0.705 + ], + "angle": 0, + "content": "2.1. Challenge Overview" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.712, + 0.483, + 0.877 + ], + "angle": 0, + "content": "Our challenge aims to advance Cross-Domain Few-Shot Object Detection (CD-FSOD) — detecting objects under domain shifts with limited labeled data. We use six previously published target domains [15] as validation sets and introduce three newly constructed datasets for final testing. Beyond the dataset update, we introduce open-source CD-FSOD as a new setting, allowing participants to freely choose source datasets and pre-trained models to enhance generalization. Fig. 1 illustrates both the predefined closed-source CD-FSOD and the new open-source CD-FSOD settings, along with the newly introduced target domains." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.091, + 0.695, + 0.106 + ], + "angle": 0, + "content": "2.2. Task Formulations" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.113, + 0.905, + 0.278 + ], + "angle": 0, + "content": "Closed-Source CD-FSOD. Given a source dataset \\(\\mathcal{D}_S\\) and a novel target dataset \\(\\mathcal{D}_T\\), the closed-source CD-FSOD track assumes that the source class set \\(\\mathcal{C}_S\\) and the target class set \\(\\mathcal{C}_T\\) are completely disjoint, i.e., \\(\\mathcal{C}_S \\cap \\mathcal{C}_T = \\emptyset\\). Additionally, the distributions of the source domain \\(\\mathcal{D}_S\\) and the target domain \\(\\mathcal{D}_T\\) are not identical. Participants are required to train models on \\(\\mathcal{D}_S\\) and test them on \\(\\mathcal{D}_T\\), where each class in \\(\\mathcal{C}_T\\) has only a few labeled examples. Usually, \\(\\mathcal{D}_S\\) is a single dataset, as in CD-ViTO [15]. We refer to this setting as closed-source CD-FSOD to differentiate it from the open-source variant." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.28, + 0.905, + 0.476 + ], + "angle": 0, + "content": "Open-Source CD-FSOD. In contrast to the closed-source setting where training data is strictly limited, the open-source CD-FSOD track is designed to leverage the capabilities of foundation models. Since these models are pretrained on large-scale and diverse datasets, it is practically hard to trace all the knowledge embedded within them. Hence, we refer to this setting as open-source. While the relaxed constraints on source data make it difficult to strictly ensure non-overlapping classes between the source and target data, the track still focuses on addressing the core challenges of domain shift and few-shot object detection. We believe this setting will significantly accelerate the development of CD-FSOD methods for real-world applications." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.477, + 0.905, + 0.551 + ], + "angle": 0, + "content": "In this challenge, the open-source CD-FSOD is designated as the main track, with awards presented to the top three teams. The closed-source CD-FSOD serves as the special track, with a single award granted to the top-performing team." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.552, + 0.905, + 0.657 + ], + "angle": 0, + "content": "\\(N\\)-way \\(K\\)-shot Protocol. We adopt the \\(N\\)-way \\(K\\)-shot evaluation protocol. For each novel class in the target class set \\(\\mathcal{C}_T\\), \\(K\\) labeled instances are provided, forming the support set \\(S\\). The remaining unlabeled instances constitute the query set \\(Q\\). Instances contained in the support set \\(S\\) are used to assist the model in recognizing and detecting the objects in \\(Q\\)." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.667, + 0.787, + 0.683 + ], + "angle": 0, + "content": "2.3. Challenge Phases and Datasets" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.689, + 0.905, + 0.764 + ], + "angle": 0, + "content": "This challenge involves one development stage and one testing stage. The source data \\(\\mathcal{D}_S\\) for both stages is the same, i.e., MS-COCO [41] for the closed-source track and unlimited data for the open-source track. While the testing data \\(\\mathcal{D}_T\\) is different." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.765, + 0.905, + 0.826 + ], + "angle": 0, + "content": "Development Stage: Datasets proposed in the CD-ViTO, including ArTaxOr [8], Clipart1K [23], DIOR [34], Deep-Fish [62], NEU-DET [67], and UODD [26] are taken as targets \\(\\mathcal{D}_T\\) during development stage." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.826, + 0.905, + 0.901 + ], + "angle": 0, + "content": "Testing Stage. Three previously unseen datasets (DeepFruits [60], Carpk [20], and CarDD [76]) are introduced and used as the targets \\(\\mathcal{D}_T\\) for the final testing phase. Note that the ground truth annotations for these query sets are held exclusively by the challenge organizers." + }, + { + "type": "page_footnote", + "bbox": [ + 0.109, + 0.887, + 0.378, + 0.9 + ], + "angle": 0, + "content": "1https://www.cvlai.net/ntire/2025/" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.505, + 0.936 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.092, + 0.095, + 0.905, + 0.271 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.089, + 0.286, + 0.908, + 0.318 + ], + "angle": 0, + "content": "Figure 1. Illustration of the challenge settings, including the closed-source and open-source CD-FSOD tracks. The three newly introduced target datasets used in the final testing phase are also shown." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.34, + 0.328, + 0.357 + ], + "angle": 0, + "content": "2.4. CD-ViTO Baseline Model" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.363, + 0.484, + 0.485 + ], + "angle": 0, + "content": "We take CD-ViTO, the current State-of-the-art (SOTA) method under the closed-source setting, as the baseline for this challenge. Briefly, CD-ViTO is built upon DE-ViT [85], an open-set detector, and fine-tuned using the support set. As in Fig. 2, modules in blue are inherited from DE-ViT, while modules in orange are newly proposed. New improvements include learnable instance features, instance reweighting, domain prompter, and finetuning." + }, + { + "type": "image", + "bbox": [ + 0.095, + 0.497, + 0.484, + 0.632 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.107, + 0.644, + 0.462, + 0.66 + ], + "angle": 0, + "content": "Figure 2. Overall framework of CD-ViTO baseline method." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.675, + 0.484, + 0.901 + ], + "angle": 0, + "content": "Intuitively, the learnable instance feature module is designed to enhance inter-class variance (ICV) among different target classes by making the initially fixed instance features learnable and optimizing them through supervised few-shot detection tasks on the target support set. The instance reweighting module further improves prototype quality by assigning higher weights to high-quality object instances—e.g., those with minimal indefinable boundary (IB). These weights are learned via a lightweight MLP and fully connected layer, as illustrated in the upper part of Fig. 2(b). The domain prompter module introduces learnable domain perturbations to simulate varying domain styles. These perturbations are applied to object prototypes, followed by a prototype consistency loss to ensure that the introduced perturbations do not affect the seman-" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.341, + 0.907, + 0.465 + ], + "angle": 0, + "content": "tic category of the prototypes. Simultaneously, a domain diversity loss encourages the generated domains to be sufficiently diverse. The lower part of Fig. 2(b) illustrates this mechanism. By injecting virtual domains and enforcing robustness against the induced perturbations, this strategy enhances the model's generalization under domain shifts. Finetuning is applied to the modules highlighted with fire icons in Fig. 2." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.484, + 0.704, + 0.5 + ], + "angle": 0, + "content": "2.5. Evaluation Protocol" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.511, + 0.907, + 0.648 + ], + "angle": 0, + "content": "The final score is measured based on the model's performance on the three datasets of the testing stage. For each dataset, we validate the models on three different few-shot settings: 1-shot, 5-shot, and 10-shot. This results in a total of nine mean Average Precision (mAP) scores: D1_1shot, D1_5shot, D1_10shot; D2_1shot, D2_5shot, D2_10shot; and D3_1shot, D3_5shot, D3_10shot. The D1, D2, D3 denote the Deep-Fruits, Carpk, and CarDD, respectively." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.651, + 0.907, + 0.682 + ], + "angle": 0, + "content": "The final ranking score is computed as a weighted average avg() of these scores:" + }, + { + "type": "equation", + "bbox": [ + 0.512, + 0.702, + 0.918, + 0.755 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\text {S c o r e} = 2 * \\text {a v g} (\\mathrm {D} 1 _ {-} 1 \\text {s h o t}, \\mathrm {D} 2 _ {-} 1 \\text {s h o t}, \\mathrm {D} 3 _ {-} 1 \\text {s h o t}) \\\\ + 1 * a v g (D 1 \\_ 5 s h o t, D 2 \\_ 5 s h o t, D 3 \\_ 5 s h o t) \\\\ + 1 * a v g (D 1. 1 0 s h o t, D 2. 1 0 s h o t, D 3. 1 0 s h o t) \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.78, + 0.907, + 0.901 + ], + "angle": 0, + "content": "Rationale for Weighted Scoring. We assign a higher weight \\((\\times 2)\\) to the 1-shot setting for two primary reasons: (1) Performance in the 1-shot scenario is generally lower than in the 5-shot and 10-shot settings due to the limited availability of labeled examples for adaptation; and (2) emphasizing 1-shot performance encourages the development of models that are more robust and effective in extremely low-data conditions." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.504, + 0.936 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.09, + 0.089, + 0.908, + 0.119 + ], + "angle": 0, + "content": "Table 1. Open-source and closed-source results on CD-FSOD. D1, D2, and D3 represent DeepFruits, CARPK, and CarDD, respectively. Mean Average Precision (mAP) on 1-shot, 5-shot, and 10-shot are reported. Teams achieving top results are highlighted." + }, + { + "type": "table", + "bbox": [ + 0.095, + 0.128, + 0.907, + 0.344 + ], + "angle": 0, + "content": "
Main Open-Source Track
RankTeam NameScoreD1_1shotD1_5shotD1_10shotD2_1shotD2_5shotD2_10shotD3_1shotD3_5shotD3_10shot
1MoveFree231.0166.1864.5862.5760.4358.8959.0048.7549.2848.00
2AI4EarthLab215.9261.1965.4165.3559.1558.0559.0034.2143.8547.00
3IDCFS215.4863.3465.4164.7561.1460.4260.0032.3339.2443.00
4FDUROILab_Lenovo211.5561.2562.8964.6659.2459.2459.0035.1337.6340.00
5HUSTLab210.7863.7161.3257.1960.4260.4760.0031.0140.0943.00
6TongjiLab172.1442.3641.9041.7455.9555.9555.0031.4031.4031.00
7Manifold159.8632.0544.2844.2757.0657.0657.0018.7129.3432.00
8MXT108.2022.2640.5741.3421.1226.3430.2323.8128.0029.00
Special Closed-Source Track
RankTeam NameScoreD1_1shotD1_5shotD1_10shotD2_1shotD2_5shotD2_10shotD3_1shotD3_5shotD3_10shot
1X-Few125.9036.5846.9550.9823.0129.6828.0020.1129.6833.00
2MM117.3932.4745.2350.2318.8329.3628.0018.3129.1431.00
3FSV112.8131.2343.8949.3213.6926.0426.5919.7130.1633.17
4IPC105.6232.5847.1245.6413.4120.7713.0018.1829.9832.00
5LJY105.2833.5246.0445.3410.6811.4525.0018.3430.9432.00
/CD-ViTO Base [15]91.0027.9537.4243.586.7721.2824.0010.0726.4730.00
" + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.366, + 0.267, + 0.384 + ], + "angle": 0, + "content": "3. Challenge Results" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.393, + 0.483, + 0.483 + ], + "angle": 0, + "content": "Among the 152 registered participants, 8 and 5 teams have participated the final testing stage and submitted their results, codes, and factsheets. Table. 1 summarizes the results of these methods. Detailed descriptions of the participants' solutions are provided in Sec.4 and Sec.5, each corresponding to a different track." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.487, + 0.483, + 0.606 + ], + "angle": 0, + "content": "Open-Source Track Results. In the open-source track, nearly all participating teams achieved strong performance with clear improvements over the provided CD-ViTO baseline. This highlights not only the effectiveness of their proposed methods but also the significance of introducing this new task setting. As observed, relaxing the strict limitation on the source data offers a substantial advantage in tackling the CD-FSOD task." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.61, + 0.483, + 0.685 + ], + "angle": 0, + "content": "Specifically, the teams MoveFree, AI4EarthLab, and IDCFS emerged as the top performers in this track, achieving scores of 231.01, 215.92, and 215.48, respectively—significantly surpassing the baseline and other teams under the same track." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.687, + 0.483, + 0.779 + ], + "angle": 0, + "content": "Closed-Source Track Results. The performance achieved by the closed-source track teams is generally lower than that of the open-source track. This is quite understandable considering that the closed-source track enforces stricter constraints. Nevertheless, the participants managed to improve the baseline method clearly." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.781, + 0.483, + 0.902 + ], + "angle": 0, + "content": "In particular, the X-Few team stands out with a final score of 125.90, significantly outperforming other competitors. This shows that well-designed architectures and training strategies can still bring notable gains even without relying on large external models. Other teams in this track also delivered solid improvements. Their contributions are valuable in terms of enabling fair comparisons and emphasizing algorithmic annotations." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.366, + 0.831, + 0.383 + ], + "angle": 0, + "content": "4. Main Open-Source Track Methods" + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.391, + 0.629, + 0.405 + ], + "angle": 0, + "content": "4.1. MoveFree" + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.415, + 0.684, + 0.43 + ], + "angle": 0, + "content": "4.1.1. Proposed Method" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.434, + 0.907, + 0.617 + ], + "angle": 0, + "content": "Open-set object detectors, such as [35], [43], and [58], are designed to detect objects based on arbitrary text descriptions. These models are typically pre-trained on large-scale, well-annotated datasets, ensuring strong alignment between textual and visual modalities. As a result, they exhibit remarkable zero-shot capabilities, allowing them to recognize and localize unseen object categories based solely on textual prompts. Given the strong generalization ability of such open-set detectors, this team believes that they are inherently well-suited for cross-domain few-shot object detection, as their robust pre-trained representations can be effectively adapted to new domains with minimal supervision." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.617, + 0.909, + 0.782 + ], + "angle": 0, + "content": "Thus, the MoveFree team focuses on leveraging and enhancing pre-trained open-set object detectors for CD-FSOD during the fine-tuning stage. The proposed approach introduces three key improvements: (1) To address the issue of missing annotations, self-training is introduced to iteratively refine the training data, thereby enhancing fine-tuning performance. (2) A Mixture-of-Experts (MoE) architecture is integrated into the open-set object detector to improve adaptability and robustness in the few-shot setting. (3) A two-stage fine-tuning pipeline is designed carefully. Code is made available2." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.784, + 0.909, + 0.876 + ], + "angle": 0, + "content": "Self-training Paradigm. According to the definition of few-shot object detection in CD-ViTO[15], \\( K \\)-shot object detection refers to having \\( K \\) labeled instances in the training data, rather than \\( K \\) fully annotated images. This implies that instances of target categories may lack annotations in the provided training set." + }, + { + "type": "page_footnote", + "bbox": [ + 0.531, + 0.887, + 0.863, + 0.901 + ], + "angle": 0, + "content": "2https://github.com/KAIJINZ228/Few_Shot_GD" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.505, + 0.936 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.09, + 0.092, + 0.485, + 0.348 + ], + "angle": 0, + "content": "Upon careful investigation, this team identified that the issue of incomplete annotations is prominent across all three test datasets in this challenge. Drawing on their expertise in developing open-set object detectors, the team recognized that missing annotations for target categories can significantly degrade model performance. This degradation occurs because the loss function penalizes the model for correctly detecting unannotated objects, mistakenly treating them as false positives due to their absence in the ground truth labels. Therefore, this team employs a self-training strategy during the fine-tuning stage of Grounding DINO to iteratively refine the annotations in the training data. Specifically, Grounding DINO periodically generates predictions on the training set, which are then incorporated as additional annotations. This iterative process gradually improves the quality of the training data, ultimately leading to enhanced model performance." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.348, + 0.485, + 0.469 + ], + "angle": 0, + "content": "The substitution of the Mixture-of-Experts (MoE). In few-shot object detection, the availability of training data is highly limited. Therefore, maximizing the object detector's ability to extract supervision from this scarce data is crucial during the fine-tuning stage. In this challenge, beyond the few-shot constraint, the cross-domain setting further increases the difficulty, as detectors usually require additional supervision to effectively adapt to a new domain." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.469, + 0.484, + 0.59 + ], + "angle": 0, + "content": "The core concept of the MoE architecture is to enable different components (i.e., experts) of a model to specialize in different aspects of the data [2]. In recent years, MoE has gained popularity in multi-modal models, including Mistral [25] and DeepSeek-V2 [42]. A common application of MoE in such models is replacing the traditional feedforward network (FFN) with an MoE-based variant, as seen in Switch Transformer [10] and OpenMoe [80]." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.59, + 0.484, + 0.741 + ], + "angle": 0, + "content": "To maximize supervision and enable the model to learn effectively from the limited training data, this team integrates a Mixture-of-Experts (MoE) mechanism into Grounding DINO during the fine-tuning stage. The MoE framework allows different experts to specialize in distinct aspects of the data, facilitating the capture of more diverse and informative representations. It is hypothesized that this capability helps Grounding DINO better adapt to the target domain while making more efficient use of the available training data." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.741, + 0.484, + 0.817 + ], + "angle": 0, + "content": "In this team's approach, the MoE mechanism is incorporated into the feed-forward network (FFN) layers of Grounding DINO's Cross-Modality Decoder. As illustrated in Figure 3, the MoE architecture consists of one shared expert and three router-selected experts." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.822, + 0.251, + 0.837 + ], + "angle": 0, + "content": "4.1.2. Training Details" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.841, + 0.484, + 0.901 + ], + "angle": 0, + "content": "A two-stage fine-tuning pipeline is adopted to adapt Grounding DINO for cross-domain few-shot object detection. In the first stage, the standard Grounding DINO (without the MoE substitution) is fine-tuned on the training data," + }, + { + "type": "image", + "bbox": [ + 0.578, + 0.128, + 0.889, + 0.327 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.577, + 0.333, + 0.882, + 0.5 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.513, + 0.527, + 0.907, + 0.556 + ], + "angle": 0, + "content": "Figure 3. Team MoveFree: an illustration of the substitution of MoE into Grounding DINO's decoder layers." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.583, + 0.907, + 0.628 + ], + "angle": 0, + "content": "with all parameters trainable except for the language encoder. In the second stage, the MoE architecture is introduced into the model." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.629, + 0.909, + 0.795 + ], + "angle": 0, + "content": "For the second stage, the model is initialized using the weights obtained from the first stage, excluding the MoE components. The shared expert within the MoE is initialized with weights from the first stage, while the three router-selected experts are initialized using the open-source pre-trained weights of Grounding DINO. This initialization strategy facilitates effective learning from limited training data while retaining knowledge acquired during the initial stage. During this phase, only the MoE components and the detection head remain trainable, with all other parts of the model kept frozen." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.796, + 0.909, + 0.9 + ], + "angle": 0, + "content": "Additionally, the self-supervised learning paradigm is applied in both stages to iteratively refine the training data and enhance performance. The training strictly adheres to the provided few-shot training set, without utilizing any external data. The overall approach is computationally efficient and can be executed on a single V100 GPU within a reasonable time frame." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.504, + 0.936 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.091, + 0.091, + 0.234, + 0.105 + ], + "angle": 0, + "content": "4.2.AI4EarthLab" + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.113, + 0.262, + 0.128 + ], + "angle": 0, + "content": "4.2.1. Proposed Method" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.132, + 0.483, + 0.222 + ], + "angle": 0, + "content": "Foundation models pretrained on large-scale datasets, such as GroundingDINO [43] and LAE-DINO [51], have demonstrated strong detection performance in cross-domain zero-shot and few-shot object detection tasks. Thus, the AI4EarthLab team is motivated to explore such foundation models for CD-FSOD." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.224, + 0.484, + 0.343 + ], + "angle": 0, + "content": "As shown in Fig. 4, this team proposes an augmentation-search strategy for CD-FSOD, which leverages open-source data and transfers the model to novel target domains. Following the approaches in [15, 52], an efficient fine-tuning method is adopted to explore the cross-domain few-shot detection capabilities of foundation models, requiring only lightweight tuning to identify effective subfields. Code is made available3." + }, + { + "type": "image", + "bbox": [ + 0.093, + 0.356, + 0.482, + 0.472 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.484, + 0.484, + 0.525 + ], + "angle": 0, + "content": "Figure 4. Team AI4EarthLab: overall framework of augmentation-search strategy Enhance Then Search (ETS) with foundation model for CD-FSOD." + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.546, + 0.483, + 0.788 + ], + "angle": 0, + "content": "Data augmentation has proven effective in reducing semantic confusion during few-shot fine-tuning, particularly in cases where categories—such as certain fruits—are visually and semantically similar. Through extensive few-shot experiments, it is observed that integrating image-based augmentation with optimal domain search strategies can further enhance the performance of foundation models, though their upper performance bound remains uncertain. Building upon the open-source Grounding DINO framework, several commonly used image augmentation techniques are incorporated, and specific optimization objectives are defined to efficiently search for optimal subdomains within a broad domain space. This strategy facilitates more effective few-shot object detection. The proposed augmentation-search strategy consists of the following steps:" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.787, + 0.483, + 0.879 + ], + "angle": 0, + "content": "Step 1: Select the foundation model. This team adopts the Swin-B version of GroundingDINO as the foundation model, because of its best performance within the open-source model. This model has been pre-trained on a diverse set of large-scale datasets, including COCO, Objects365 (O365), GoldG, Cap4M, OpenImages, ODinW-35," + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.092, + 0.905, + 0.136 + ], + "angle": 0, + "content": "and RefCOCO, which collectively provide strong generalization capabilities across multiple vision-language grounding tasks." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.137, + 0.907, + 0.333 + ], + "angle": 0, + "content": "Step 2: Build a combined image augmentation pipeline. To improve the model's adaptability to various subdomains under limited data scenarios, this team construct a composite image augmentation pipeline. This pipeline randomly applies a combination of augmentation techniques such as CachedMosaic, YOLOXHSVRandomAug, RandomFlip, CachedMixUp, RandomResize, and RandomCrop. These methods are designed to enhance sample diversity, simulate domain shifts, and improve the model's robustness during fine-tuning. Additional data augmentation techniques, such as Copy-Paste, are also evaluated. However, these methods are found to introduce greater instability during few-shot fine-tuning." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.334, + 0.906, + 0.438 + ], + "angle": 0, + "content": "Step 3: Construct an optimized target domain validation set. To evaluate adaptation performance, a subset of the annotated test data is sampled and used as a validation set. Rather than employing full annotations, coarse-grained labeling is applied to provide sufficient supervision for hyperparameter tuning, while significantly reducing annotation costs in the target domain." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.439, + 0.906, + 0.53 + ], + "angle": 0, + "content": "Step 4: Search for the best model parameters on the validation set. Hyperparameter search and model selection are conducted based on validation performance. This process involves tuning the learning rate, augmentation intensity, and other training configurations to determine the optimal setup for effective domain adaptation." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.53, + 0.905, + 0.59 + ], + "angle": 0, + "content": "Step 5: Perform inference on the test set. Once the optimal configuration is identified, the fine-tuned model is applied to the held-out test set to evaluate its final performance on the target domain." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.596, + 0.673, + 0.61 + ], + "angle": 0, + "content": "4.2.2. Training Details" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.614, + 0.907, + 0.901 + ], + "angle": 0, + "content": "Experiments are conducted on eight NVIDIA A100 GPUs, executing \\(8 \\times 50\\) experiment groups per round. During training, the optimal step size is selected based on historical performance to accelerate the fine-tuning process. Learning rate schedules are adjusted using milestone epochs, typically set to 1, 5, and 9 depending on the fine-tuning setting. The model uses 900 queries by default and a maximum text token length of 256. A BERT-based text encoder with BPE tokenization is employed. Both the feature enhancer and cross-modality decoder consist of six layers, and deformable attention is adopted in the image cross-attention modules. The loss function comprises classification (or contrastive) loss, box L1 loss, and GIoU loss. Following the Grounding DINO framework, Hungarian matching weights are set to 2.0 (classification), 5.0 (L1), and 2.0 (GIoU), while the final loss weights are 1.0, 5.0, and 2.0, respectively. Although various hyperparameter configurations are also explored, their impact is found to be relatively minor compared to that of data augmentation strategies." + }, + { + "type": "page_footnote", + "bbox": [ + 0.108, + 0.887, + 0.378, + 0.9 + ], + "angle": 0, + "content": "3https://github.com/jaychempan/ETS" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.926, + 0.505, + 0.937 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.091, + 0.091, + 0.182, + 0.105 + ], + "angle": 0, + "content": "4.3. IDCFS" + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.113, + 0.262, + 0.128 + ], + "angle": 0, + "content": "4.3.1. Proposed Method" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.132, + 0.484, + 0.345 + ], + "angle": 0, + "content": "The IDCFS team proposes a Pseudo-Label Driven Vision-Language Grounding method for CD-FSOD. As shown in Figure 5, the proposed method mainly combines large-scale foundation models with an iterative pseudo-labeling strategy. The GLIP [35] is being fine-tuned using three approaches, with the full model fine-tuned delivering the best results in most cases. To better exploit the support set, an iterative training strategy is proposed and applied, using high-confidence predictions as pseudo-labels to refine the model. Additionally, this team also fine-tunes Grounding DINO [43] with LoRA [21], efficiently modifying the attention layers while freezing the base model. Finally, the model ensemble with confidence-reweighted NMS is further adopted to boost accuracy. Code is made available4." + }, + { + "type": "image", + "bbox": [ + 0.094, + 0.359, + 0.483, + 0.519 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.53, + 0.483, + 0.559 + ], + "angle": 0, + "content": "Figure 5. Team IDCFS: overview of the proposed Pseudo-Label Driven Vision-Language Grounding for CD-FSOD." + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.575, + 0.483, + 0.801 + ], + "angle": 0, + "content": "Fine-tuning on GLIP. Foundation models pretrained on large-scale datasets, such as GLIP [35], have demonstrated strong performance in zero-shot and few-shot object detection tasks. The proposed method is based on the GLIP-L model, which has been pretrained on several datasets including FourODs, GoldG, CC3M+12M, and SBU. For downstream tasks, this team tried three ways to fine-tune GLIP: 1) Full Model Fine-Tuning: fine-tune all parameters of the GLIP-L model using a relatively small learning rate \\((\\mathrm{lr} = 2\\mathrm{e} - 5)\\). 2) Prompt Tuning V1: fine-tune only the parameters of the text branch. 3) Prompt Tuning V2: This mode performs traditional prompt tuning by applying a linear layer to map the extracted text features. Experiments show that Full Model Fine-Tuning generally achieves the best fine-tuning performance in most cases." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.802, + 0.484, + 0.879 + ], + "angle": 0, + "content": "Iterative Training. Given the scarcity, high cost, and limited availability of annotated data in few-shot learning scenarios, this team also designed an iterative training approach to train the model, as shown in Figure 6. Specifically, the proposed method first fine-tunes the model for" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.092, + 0.907, + 0.228 + ], + "angle": 0, + "content": "a few steps using the available labeled data. Then, the fine-tuned model is used to predict the support set samples, selecting the predictions with high confidence as pseudolabels to update the label information of the support set samples. The model is then fine-tuned again. By iterating this process, the proposed method fully utilizes the information in the support set samples, achieving better performance while ensuring the robustness of the model, making it less susceptible to the influence of low-quality labels." + }, + { + "type": "image", + "bbox": [ + 0.516, + 0.244, + 0.907, + 0.383 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.513, + 0.391, + 0.904, + 0.407 + ], + "angle": 0, + "content": "Figure 6. Team IDCFS: overview of the iterative training process." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.421, + 0.907, + 0.587 + ], + "angle": 0, + "content": "Fine-tuning Grounding DINO with LoRA. The IDCFS team also uses Grounding DINO [43] as another foundation model to generate bounding boxes and classification probabilities. The LoRA [21] is used to fine-tune GroundingDINO on the few-shot training set. Specifically, this team adds bypass adapters to the linear projection layers (i.e., query, key, and value) of the attention mechanism in the visual backbone and BERT of Grounding DINO. To facilitate better adaptation to cross-domain datasets, the original model weights are frozen, and only the newly added parameters are trained." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.588, + 0.909, + 0.755 + ], + "angle": 0, + "content": "Model Ensemble. To effectively combine the outputs of GLIP and Grounding DINO, a model ensemble strategy with confidence reweighting is employed. Specifically, the detection scores from each model are scaled by predefined reliability weights. The reweighted predictions are then merged and refined using Non-Maximum Suppression (NMS) [47] to eliminate redundant bounding boxes and produce the final fused results. This approach allows the more reliable model to have a greater influence on the final predictions, enhancing detection performance by leveraging the complementary strengths of both models." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.761, + 0.673, + 0.776 + ], + "angle": 0, + "content": "4.3.2. Training Details" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.78, + 0.909, + 0.903 + ], + "angle": 0, + "content": "For GLIP fine-tuning, the GLIP-L variant is used, which incorporates Swin-L [46] as the visual encoder and BERT [7] as the text encoder. The model is pre-trained on a variety of datasets, including FourODs [29-31], GoldG [27], CC3M+12M, and SBU [49]. During fine-tuning, full-model training is applied with a reduced learning rate of 2e-5, compared to the original setting of 1e-4 in GLIP. For Grounding DINO, the Swin-B [46] backbone is used as the vi" + }, + { + "type": "page_footnote", + "bbox": [ + 0.109, + 0.887, + 0.442, + 0.901 + ], + "angle": 0, + "content": "4https://github.com/Pumpkinder/GLIP-CDFSOD" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.505, + 0.936 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.09, + 0.092, + 0.485, + 0.226 + ], + "angle": 0, + "content": "sual encoder and BERT from Hugging Face [78] as the text encoder. The model is pre-trained on COCO [41], Objects365 [65], GoldG [27], Cap4M, OpenImages [31], ODinW-35 [33], and RefCOCO [27]. For the 1-shot and 5-shot settings on the CARPK dataset [20], no fine-tuning is performed. For 1-shot training on DeepFruits [60], only the backbone is fine-tuned using LoRA. In all other cases, LoRA is used to fine-tune both the backbone and the BERT text encoder." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.239, + 0.292, + 0.253 + ], + "angle": 0, + "content": "4.4. FDUROILab_Lenovo" + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.261, + 0.262, + 0.276 + ], + "angle": 0, + "content": "4.4.1. Proposed Method" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.28, + 0.483, + 0.386 + ], + "angle": 0, + "content": "Efficient Tuning. To enhance the model's adaptability in cross-domain few-shot detection (CDFSOD), this team proposes an efficient fine-tuning strategy. The proposed approach leverages data augmentation techniques to expand the training set and improve the model's ability to recognize objects in the target domain with proposed k-shot annotated samples." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.387, + 0.484, + 0.448 + ], + "angle": 0, + "content": "Specifically, given a k-shot setting, where \\( \\mathbf{k} \\) represents the number of provided object samples, the proposed approach adopts a structured fine-tuning pipeline, which is shown in Figure 7." + }, + { + "type": "image", + "bbox": [ + 0.092, + 0.46, + 0.482, + 0.572 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.584, + 0.483, + 0.612 + ], + "angle": 0, + "content": "Figure 7. Team FDUROILab_Lenovo: overview of the efficient tuning and inference." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.629, + 0.484, + 0.901 + ], + "angle": 0, + "content": "(1) Object Cropping and Augmentation. Using the provided bounding boxes of k-shot examples, the proposed method first crops the target objects from the original images. The cropped objects are then subjected to various data augmentation techniques, including flipping, rotation, grayscale conversion, and other transformations, to introduce diversity and improve generalization. (2) Object Rescaling and Random Pasting. The proposed method randomly rescales the augmented objects to different sizes and pastes these transformed objects to the original images at different locations. This step simulates new object placements and enhances the model's robustness to variations in object appearance and context. (3) Fine-Tuning with Augmented Data. The proposed method finetunes the open-vocabulary detection model with the augmented images. This enables the detector to better adapt to objects in the target domain, even with minimal labeled examples. Additionally, the augmented data effectively increases the number of" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.092, + 0.906, + 0.168 + ], + "angle": 0, + "content": "training samples, mitigating the few-shot learning limitation and improving overall detection performance. Through this efficient fine-tuning approach, the finetuned model gains enhanced adaptability to new domains while maintaining the advantages of open-vocabulary detection." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.17, + 0.907, + 0.32 + ], + "angle": 0, + "content": "Inference. Since the proposed approach is based on an open-vocabulary detection model, it requires access to the target category labels during inference, which is shown in Figure 7. To obtain these labels, this team utilizes Qwen2.5-VL [1] to generate the textual descriptions of the target categories. The retrieved target labels from Qwen2.5-VL are used as textual input to guide the detection process. Then, the open-vocabulary detection model [11] is used to identify and classify objects in the test image based on the provided text-based labels." + }, + { + "type": "image", + "bbox": [ + 0.517, + 0.334, + 0.907, + 0.477 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.545, + 0.487, + 0.874, + 0.502 + ], + "angle": 0, + "content": "Figure 8. Team FDUROILab_Lenovo: post processing." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.524, + 0.909, + 0.903 + ], + "angle": 0, + "content": "Post-Process. Although existing open-vocabulary detectors possess strong open-set detection capabilities, their performance on the challenge test set remains suboptimal. Upon further analysis, this team found that while the detector can successfully identify most objects, its primary weakness lies in classification errors rather than detection failures. This indicates that the open-vocabulary detection model still struggles with accurate classification when adapting to objects in a new domain. To address this issue, the Qwen2.5-VL is introduced as an auxiliary classifier to refine the final predictions, which is shown in Figure 8. For each test image, this team prompts Qwen2.5-VL to describe the objects present in the scene and provide a list of candidate categories that are likely to appear in the image. After that, this team refines the output of the open-vocabulary detection model using one of two strategies: (1) Filtering. Remove objects that are classified incorrectly by the detector and are not listed by Qwen2.5-VL. (2) Reclassification: Assign all detected objects to one of the categories predicted by Qwen2.5-VL, ensuring consistency between the detected bounding boxes and the high-level scene understanding of the multimodal model. The choice between these two strategies depends on the specific test dataset. By leveraging Qwen2.5-VL as a post-processing step, this team effectively corrects classification errors and enhances the" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.926, + 0.504, + 0.936 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.091, + 0.092, + 0.486, + 0.123 + ], + "angle": 0, + "content": "model's performance on unseen domains, leading to more accurate and reliable object detection results." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.132, + 0.251, + 0.146 + ], + "angle": 0, + "content": "4.4.2. Training Details" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.151, + 0.485, + 0.317 + ], + "angle": 0, + "content": "LLMDet [11] is adopted as the open-vocabulary detection model, with Swin-Large [46] serving as the visual backbone. The Qwen2.5-VL-72B [1] is introduced as the multimodal large language model (MLLM). Fine-tuning experiments are conducted on eight NVIDIA RTX 3090 GPUs, using a batch size of 8 and a learning rate of 1e-6. The number of training iterations varies across datasets and few-shot settings. For DeepFruits [60] and CarDD [76], the model is fine-tuned for 30, 50, and 100 batches under the 1-shot, 5-shot, and 10-shot settings. No fine-tuning is performed for CARPK [20]." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.318, + 0.484, + 0.501 + ], + "angle": 0, + "content": "To enhance classification accuracy, dataset-specific post-processing strategies are applied. For DeepFruits, all detected objects are reclassified into one of the categories predicted by Qwen2.5-VL. In the case of CarDD, detected objects not belonging to the predefined categories are filtered out. As CARPK contains only a single object category, no additional classification is performed. However, further filtering is applied to remove overly large bounding boxes, which are likely to be incorrect, as the objects in this dataset are generally small. In all cases, Non-Maximum Suppression (NMS) is used to eliminate redundant or overlapping predictions." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.51, + 0.208, + 0.525 + ], + "angle": 0, + "content": "4.5. HUSTLab" + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.534, + 0.262, + 0.549 + ], + "angle": 0, + "content": "4.5.1. Proposed Method" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.553, + 0.483, + 0.78 + ], + "angle": 0, + "content": "The HUSTLab explores the usage of Qwen2.5VL, MM-GroundingDINO, and LLMDet for the open-source CD-FSOD. The proposed method can be divided into two distinct phases: 1) Obtaining text descriptions from the training set using the Qwen2.5VL model; 2) Selecting a base model, such as Grounding DINO or LLMDet, and fine-tuning it with CopyPaste data augmentation, followed by Adversarial Weight Perturbation (AWP) training to derive the final model and obtain test results. We observe that models like Grounding DINO possess robust object detection capabilities, and fine-tuning them with few-shot data significantly enhances detection performance in specific domains. Moreover, for training sets with limited samples, utilizing text descriptions generated by large-scale vision-language models proves highly effective." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.781, + 0.484, + 0.902 + ], + "angle": 0, + "content": "Text Description Generation with a Large VLM. In this phase, this team leverages Qwen2.5VL to generate detailed text descriptions for the limited samples in the training set, extracting text-modal information from the images [50]. Converting visual-modal information into text-modal information helps eliminate noise and condense semantic content. These detailed text descriptions are robust and will be fully utilized during the testing phase to enhance cross" + }, + { + "type": "image", + "bbox": [ + 0.516, + 0.091, + 0.875, + 0.182 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.513, + 0.195, + 0.907, + 0.222 + ], + "angle": 0, + "content": "Figure 9. Team HUSTLab: overall framework of the proposed method." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.252, + 0.828, + 0.267 + ], + "angle": 0, + "content": "domain few-shot object detection performance." + }, + { + "type": "image", + "bbox": [ + 0.523, + 0.287, + 0.903, + 0.366 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.525, + 0.375, + 0.889, + 0.39 + ], + "angle": 0, + "content": "Figure 10. Team HUSTLab: text description generation [50]." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.408, + 0.907, + 0.59 + ], + "angle": 0, + "content": "Training Phase. In this stage, this team first selects an appropriate base model—either Grounding DINO[43] or LLMDet—based[11] on its compatibility with the dataset. Using the zero-shot capabilities of the chosen base model, this team generates pseudo-labels, which are combined with ground-truth labels during training to regularize the model under few-shot conditions. To fine-tune the base model, this team uses CopyPaste[17] data augmentation and Adversarial Weight Perturbation (AWP) techniques[79]. This approach strengthens the model's generalization and robustness, enabling it to effectively handle cross-domain few-shot object detection tasks." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.6, + 0.673, + 0.615 + ], + "angle": 0, + "content": "4.5.2. Training Details" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.62, + 0.907, + 0.815 + ], + "angle": 0, + "content": "The model is fine-tuned on three datasets using the MM-GroundingDINO-Large implementation provided by MMDetection as the base object detection framework, with the aim of enhancing cross-domain detection capabilities. The performance largely depends on prompt design. Since part of the BERT-based text encoder is kept frozen during training, prompt quality plays a crucial role in boosting performance for certain object detection tasks. Prompts generated using Qwen2.5-VL are able to accurately describe the attribute features associated with abstract category names, thereby assisting the model in object localization and recognition. All experiments are conducted on \\(4 \\times\\) NVIDIA RTX 3090 GPUs." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.828, + 0.632, + 0.844 + ], + "angle": 0, + "content": "4.6. TongjiLab" + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.851, + 0.685, + 0.866 + ], + "angle": 0, + "content": "4.6.1. Proposed Method" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.871, + 0.906, + 0.902 + ], + "angle": 0, + "content": "The TongjiLab proposes ProtoDINO, an innovative approach for CD-FSOD under the open-set setting, building" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.505, + 0.936 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.09, + 0.092, + 0.485, + 0.408 + ], + "angle": 0, + "content": "upon GroundingDINO [43] as the baseline model. To improve the target classification performance of the baseline model, the CLIP model [22, 54] is employed to extract both local and global image features from a limited set of target domain samples. These features are subsequently used to construct support sets, which serve as the foundation for building local prototype and global prototype networks, respectively. In addition, a text prototype network is developed using the CLIP model. During the target detection phase, visual features are extracted from each image query using CLIP. The L2 distances between these visual features and the local prototypes, global prototypes, and text prototypes are then computed, with these distances serving as one of the metrics for target classification. Furthermore, a car-damage-detection model5, implemented as a vehicle appearance damage classification model based on the Vision Transformer (ViT), is incorporated. For the final target classification, matching probabilities derived from the GroundingDINO model, the car-damage-detection model, and the prototype networks [66] are weighted and combined to produce the overall classification metric." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.41, + 0.484, + 0.456 + ], + "angle": 0, + "content": "The framework of the proposed ProtoDINO is depicted in Fig. 11. Overall, ProtoDINO operates in two key stages: prototype construction and target detection." + }, + { + "type": "image", + "bbox": [ + 0.093, + 0.469, + 0.482, + 0.652 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.663, + 0.483, + 0.69 + ], + "angle": 0, + "content": "Figure 11. Team TongjiLab: framework of the proposed ProtoDINO." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.71, + 0.484, + 0.876 + ], + "angle": 0, + "content": "Prototype Construction. During the prototype construction phase, this team crops few-shot learning images based on their annotations and generates visual embeddings as local feature prototypes \\( c_{local} \\) for these local patches using the CLIP model. For 5-shot and 10-shot settings, \\( c_{local} \\) is computed as the mean of all visual embeddings within the same category. Similarly, global feature prototypes \\( c_{global} \\) are derived by encoding entire images through CLIP and applying the same averaging strategy across categories. For each category text \\( t \\), this team builds the text prototype \\( c_{text} \\) using CLIP as the text encoder." + }, + { + "type": "equation", + "bbox": [ + 0.64, + 0.104, + 0.907, + 0.143 + ], + "angle": 0, + "content": "\\[\nc _ {l o c a l} ^ {(n)} = \\frac {1}{n} \\sum_ {i = 1} ^ {n} F _ {c r o p} ^ {(i)} \\tag {1}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.64, + 0.149, + 0.907, + 0.189 + ], + "angle": 0, + "content": "\\[\nc _ {g l o b a l} ^ {(n)} = \\frac {1}{n} \\sum_ {i = 1} ^ {n} F _ {i} ^ {(i)} \\tag {2}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.634, + 0.196, + 0.907, + 0.216 + ], + "angle": 0, + "content": "\\[\nc _ {t e x t} ^ {(n)} = f _ {\\text {c l i p - t e x t}} \\left(t ^ {(n)}\\right) \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.222, + 0.907, + 0.541 + ], + "angle": 0, + "content": "Target Detection. In the target detection stage, the input image and target category texts are processed by GroundingDINO to generate bounding boxes and initial classification probabilities. These bounding boxes are used to crop local regions from the image, which are then encoded by CLIP to obtain their visual features \\( F_{crop} \\). To classify these regions, this team computes the L2 distances between their representations and the precomputed prototypes as in Eq. 4. These distances are transformed into probability distributions via a softmax operation, yielding the prototype network's classification output as in Eq. 5. Simultaneously, the cropped regions are evaluated by a pre-trained car-damage-detection model (based on Vision Transformer) to generate additional classification probabilities. The final classification decision is derived by aggregating probabilities from GroundingDINO, the car-damage-detection model, and the prototype network through a weighted summation as in Eq. 6. This fusion approach effectively integrates geometric localization from GroundingDINO, cross-modal semantics from CLIP, domain-specific insights from the car-damage-detection model, and few-shot prototype matching." + }, + { + "type": "equation", + "bbox": [ + 0.617, + 0.551, + 0.907, + 0.592 + ], + "angle": 0, + "content": "\\[\nd (u, v) = \\sqrt {\\sum_ {n} \\left(u ^ {n} - v ^ {n}\\right) ^ {2}} \\tag {4}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.598, + 0.598, + 0.907, + 0.627 + ], + "angle": 0, + "content": "\\[\np r o b s _ {p r o t o} = - \\frac {1}{\\sigma} \\cdot e ^ {N o r m [ d (F, c) ]} \\tag {5}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.627, + 0.634, + 0.906, + 0.665 + ], + "angle": 0, + "content": "\\[\np r o b s = \\sum_ {i} w _ {i} \\cdot p r o b s _ {i} \\tag {6}\n\\]" + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.671, + 0.673, + 0.686 + ], + "angle": 0, + "content": "4.6.2. Training Details" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.689, + 0.907, + 0.903 + ], + "angle": 0, + "content": "The implementation is carried out on a server running CentOS 7, equipped with a single RTX 6000 Ada GPU. For the CLIP model, the DFN5B-CLIP-ViT-H-14-378 implementation is selected due to its balance between performance and efficiency in processing visual and textual data. For the GroundingDINO model, the official implementation is used. Based on empirical observations, the threshold parameter \\(\\sigma\\) is set to 0.5, which provides optimal results across various scenarios. In GroundingDINO, the bounding box confidence threshold (BOX_THRESHOLD) is set to 0.3. For the final decision fusion, the weighting coefficients for integrating outputs from multiple modules are empirically assigned as: \\(w_{\\mathrm{local}} = 0.25\\) (local prototype network), \\(w_{\\mathrm{global}} = 0.15\\) (global prototype network), \\(w_{\\mathrm{text}} = 0.4\\) (text" + }, + { + "type": "page_footnote", + "bbox": [ + 0.108, + 0.887, + 0.476, + 0.901 + ], + "angle": 0, + "content": "5 https://huggingface.co/beingamit99/car_damage_detector/tree/main" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.925, + 0.51, + 0.937 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.091, + 0.092, + 0.486, + 0.123 + ], + "angle": 0, + "content": "prototype network), \\( w_{\\mathrm{dino}} = 0.1 \\) (GroundingDINO), and \\( w_{\\mathrm{car}} = 0.1 \\) (car-damage-detection model)." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.133, + 0.2, + 0.147 + ], + "angle": 0, + "content": "4.7. Manifold" + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.156, + 0.262, + 0.171 + ], + "angle": 0, + "content": "4.7.1. Proposed Method" + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.175, + 0.485, + 0.371 + ], + "angle": 0, + "content": "To address the challenge of few-shot object detection in cross-domain scenarios, the Manifold team proposes a novel approach based on the detection pipeline of a two-stage object detection algorithm. As illustrated in the Figure. 12, the proposed method first employs an open set object detection network, which is trained on public datasets, to detect objects in the query image. However, due to the domain gap between the pretraining datasets and the query datasets, the detection results cannot be directly trusted. Therefore, this team treats these results as region proposals that may contain objects of interest. Subsequently, this team combines the instance features from the support set for classification to obtain the final detection results." + }, + { + "type": "image", + "bbox": [ + 0.095, + 0.386, + 0.485, + 0.521 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.111, + 0.534, + 0.46, + 0.548 + ], + "angle": 0, + "content": "Figure 12. Team Manifold: overall framework of GDPRE." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.568, + 0.483, + 0.749 + ], + "angle": 0, + "content": "GroundingDINO-based Region Proposals. The GroundingDINO is selected as the pre-trained open-set object detector. It can detect objects of interest in images using input text, and it was pre-trained on seven datasets: COCO, O365, GoldG, Cap4M, OpenImage, ODinW-35, and RefCOCO. This pre-training gives it good detection capabilities for most real-world objects. However, in cross-domain few-shot scenarios, its detection effectiveness is suboptimal. For example, avocados may be misclassified as oranges because of the higher frequency of oranges in the pre-training data. Despite this, GroundingDINO can still provide region proposals for potential objects of interest in query images." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.75, + 0.483, + 0.902 + ], + "angle": 0, + "content": "ResNet-based Feature Classification. After obtaining region proposals, this team classifies the objects within them using support set images. Given the limited samples and significant intra-class variations in image space, directly matching support instances with query candidates in this space yields poor results. ResNet pre-trained on ImageNet is used to extract image features, mapping instances to a more robust feature space. To address scale differences, this team resize instances in both support and region proposals images to \\(256 \\times 256\\) for feature extraction. Considering" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.092, + 0.907, + 0.228 + ], + "angle": 0, + "content": "some classes have large intra-class and small inter-class differences, this team treats each instance's feature vector in multi-shot settings as a separate support vector rather than averaging them by class. This team calculates the cosine similarity between candidate region instances and support set instance feature vectors, assigning the region proposal instance to the class of the most similar support instance. This yields the final detection results, and the cosine similarity serves as the prediction confidence." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.239, + 0.724, + 0.254 + ], + "angle": 0, + "content": "4.7.2. Implementation Details" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.258, + 0.907, + 0.425 + ], + "angle": 0, + "content": "Given that both GroundingDINO and ResNet are pretrained on large-scale datasets, fine-tuning them under few-shot constraints—where the training classes do not overlap with the test classes—can be challenging. As a result, the pre-trained model weights are kept frozen. This approach requires minimal computational resources and can be executed on a laptop equipped with an RTX 4060 GPU. During inference, the category names from the test dataset are used as prompt inputs for GroundingDINO, and the BOX_THRESHOLD is set to 0.1 to obtain the final detection results." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.438, + 0.593, + 0.452 + ], + "angle": 0, + "content": "4.8.MXT" + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.461, + 0.684, + 0.476 + ], + "angle": 0, + "content": "4.8.1. Proposed Method" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.481, + 0.907, + 0.633 + ], + "angle": 0, + "content": "This team proposes a Domain Adaptation Enhancement Module (DAEM) for Cross-Domain Few-Shot Object Detection (CD-FSOD), built as an extension to the CD-ViTO framework. While CD-ViTO provides a strong foundation for open-set cross-domain detection with DinoV2 ViT-L backbone, it still faces challenges with significant domain shifts. As illustrated in Fig 13, the DAEM integrates seamlessly with the DinoV2 ViT-L backbone and enhances domain adaptation through two complementary mechanisms: batch enhancement and feature alignment." + }, + { + "type": "image", + "bbox": [ + 0.518, + 0.647, + 0.907, + 0.813 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.541, + 0.823, + 0.878, + 0.837 + ], + "angle": 0, + "content": "Figure 13. Team DAEM: overall of the proposed model." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.856, + 0.907, + 0.903 + ], + "angle": 0, + "content": "Batch Enhancement Module. The batch enhancement module increases training diversity through controlled style transfer between domains. For both source and target do" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.925, + 0.508, + 0.937 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.09, + 0.092, + 0.483, + 0.123 + ], + "angle": 0, + "content": "main images, this team introduces cross-domain characteristics while preserving semantic content:" + }, + { + "type": "equation", + "bbox": [ + 0.178, + 0.139, + 0.483, + 0.171 + ], + "angle": 0, + "content": "\\[\n\\operatorname {i m g} _ {\\text {s t y l e d}} = \\sigma_ {t} \\cdot \\frac {\\operatorname {i m g} - \\mu_ {s}}{\\sigma_ {s}} + \\mu_ {t} \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.181, + 0.485, + 0.227 + ], + "angle": 0, + "content": "where \\(\\mu_s, \\sigma_s\\) are source image statistics and \\(\\mu_t, \\sigma_t\\) are target domain statistics. The enhancement strength \\(\\alpha\\) gradually increases during training as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.202, + 0.242, + 0.483, + 0.274 + ], + "angle": 0, + "content": "\\[\n\\alpha = \\min (1. 0, \\frac {t}{T _ {\\text {w a r m u p}}}) \\tag {8}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.285, + 0.484, + 0.33 + ], + "angle": 0, + "content": "where \\( t \\) is the current iteration and \\( T_{warmup} \\) is set to 500. This gradual adaptation prevents disrupting the pre-trained DinoV2 ViT-L features early in training." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.332, + 0.484, + 0.392 + ], + "angle": 0, + "content": "Feature Alignment Module. The feature alignment module employs two complementary strategies to reduce domain gaps: Maximum Mean Discrepancy (MMD) and style-based adaptation." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.393, + 0.484, + 0.469 + ], + "angle": 0, + "content": "MMD Loss: The Maximum Mean Discrepancy is applied to reduce distribution differences between features from the source and target domains. MMD measures the distance between feature distributions in a reproducing kernel Hilbert space:" + }, + { + "type": "equation", + "bbox": [ + 0.105, + 0.497, + 0.483, + 0.56 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {M M D} \\left(\\mathbf {X} _ {s}, \\mathbf {X} _ {t}\\right) = \\left\\| \\frac {1}{n _ {s}} \\sum_ {i = 1} ^ {n _ {s}} \\phi \\left(\\mathbf {x} _ {s} ^ {i}\\right) - \\frac {1}{n _ {t}} \\sum_ {j = 1} ^ {n _ {t}} \\phi \\left(\\mathbf {x} _ {t} ^ {j}\\right) \\right\\| _ {\\mathcal {H}} ^ {2} \\tag {9}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.562, + 0.484, + 0.638 + ], + "angle": 0, + "content": "This is implemented with multiple Gaussian kernels with bandwidths \\(\\sigma \\in \\{0.5, 1.0, 2.0, 5.0\\}\\) to capture similarities at different feature scales. This approach guides DinoV2 ViT-L to preserve its powerful representation abilities while adapting to target domains with minimal samples." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.64, + 0.484, + 0.698 + ], + "angle": 0, + "content": "Style Loss: Style-based adaptation addresses visual variations between domains that are unrelated to object semantics. For feature maps \\(\\mathbf{F}\\), the channel-wise statistics is transformed as:" + }, + { + "type": "equation", + "bbox": [ + 0.209, + 0.715, + 0.483, + 0.748 + ], + "angle": 0, + "content": "\\[\n\\hat {\\mathbf {F}} = \\sigma_ {t} \\cdot \\frac {\\mathbf {F} - \\mu_ {s}}{\\sigma_ {s}} + \\mu_ {t} \\tag {10}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.758, + 0.484, + 0.818 + ], + "angle": 0, + "content": "where \\(\\mu_s, \\sigma_s\\) and \\(\\mu_t, \\sigma_t\\) are the channel statistics of source and target features. This approach helps Di-noV2 ViT-L focus on domain-invariant object characteristics rather than domain-specific visual styles." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.82, + 0.484, + 0.865 + ], + "angle": 0, + "content": "The overall training objective combines the original CDViTO detection loss with the proposed domain adaptation components:" + }, + { + "type": "equation", + "bbox": [ + 0.148, + 0.886, + 0.483, + 0.903 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} = \\mathcal {L} _ {\\text {d e t}} + \\lambda_ {m m d} \\mathcal {L} _ {M M D} + \\lambda_ {\\text {s t y l e}} \\mathcal {L} _ {\\text {s t y l e}} \\tag {11}\n\\]" + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.092, + 0.673, + 0.107 + ], + "angle": 0, + "content": "4.8.2. Training Details" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.111, + 0.907, + 0.246 + ], + "angle": 0, + "content": "Following the pretrain–finetune–test pipeline established in the CD-FSOD benchmark, the pretrained DinoV2 ViT-L backbone from CD-ViTO is utilized. During fine-tuning, the backbone and Region Proposal Network (RPN) are selectively frozen, while the Domain-Adaptive Enhancement Modules (DAEM) and ROI Heads are optimized. This strategy preserves the general representational power of DinoV2 ViT-L while allowing domain-specific components to adapt effectively." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.248, + 0.907, + 0.353 + ], + "angle": 0, + "content": "Training is conducted on NVIDIA A800 GPUs, with hyperparameters determined through extensive experimentation: the MMD loss weight is set to \\(\\lambda_{mmd} = 0.16\\), the style loss weight to \\(\\lambda_{style} = 0.12\\), and the batch enhancement strength to \\(\\alpha_{max} = 0.8\\). Differential learning rates are applied, using a multiplier of 2.0 for the DAEM modules and bias terms, with a base learning rate of \\(1 \\times 10^{-4}\\)." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.355, + 0.909, + 0.505 + ], + "angle": 0, + "content": "A warm-up phase of 500 iterations is introduced to gradually increase adaptation strength. This helps stabilize early-stage training and prevents disruption of the pretrained DinoV2 ViT-L features. Optimization is performed using stochastic gradient descent (SGD) with a momentum of 0.9 and a weight decay of \\(1 \\times 10^{-4}\\). The model reaches optimal cross-domain performance after approximately 50 epochs. The proposed approach maintains the efficiency of CD-ViTO while delivering substantial improvements in challenging cross-domain few-shot detection scenarios." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.52, + 0.857, + 0.538 + ], + "angle": 0, + "content": "5. Special Closed-Source Track Methods" + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.546, + 0.602, + 0.56 + ], + "angle": 0, + "content": "5.1. X-Few" + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.568, + 0.685, + 0.583 + ], + "angle": 0, + "content": "5.1.1. Proposed Method" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.588, + 0.906, + 0.692 + ], + "angle": 0, + "content": "To address the challenges of domain shift and category confusion arising from limited annotated data in CD-FSOD, the X-Few team proposes a novel domain adaptation strategy based on the Instance Feature Caching (IFC) mechanism. The framework of the proposed method is shown in Fig. 14, which is mainly built upon the CD-ViTO baseline. Code is made available \\(^{6}\\)." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.694, + 0.907, + 0.844 + ], + "angle": 0, + "content": "Intuitively, the IFC module is proposed to construct a cache model that could store and dynamically retrieve discriminative instance-level features from the target domain, alleviating model degradation caused by cross-domain distribution discrepancy in the few-shot supervision situation. Specifically, the IFC mechanism facilitates knowledge transfer through prototype-based feature alignment and an attention-guided memory update strategy, enhancing the model's generalization capability in the data-scarce cross-domain scenario." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.846, + 0.906, + 0.877 + ], + "angle": 0, + "content": "Instance Feature Caching Construction. Given a support set \\( S \\) comprising \\( N \\) target categories, each consisting" + }, + { + "type": "page_footnote", + "bbox": [ + 0.53, + 0.887, + 0.89, + 0.901 + ], + "angle": 0, + "content": "\\(^{6}\\)https://github.com/johnmaijer/X-Few-_CD-FSOD" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.925, + 0.509, + 0.937 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.094, + 0.089, + 0.483, + 0.216 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.225, + 0.483, + 0.254 + ], + "angle": 0, + "content": "Figure 14. Team X-Few: illustration of the proposed Instance Feature Caching (IFC)." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.276, + 0.484, + 0.368 + ], + "angle": 0, + "content": "of \\(K\\) annotated instances, denoted as \\(I_{K}\\) with their associating labels \\(L_{N}\\). For all \\(N \\times K\\) support samples, the proposed method leverages a pre-trained DINoV2 ViT \\(f_{CM}\\) to obtain the instance-level features \\(F_{train} \\in \\mathbf{R}^{NK \\times C}\\). Similarly, the ground-truth labels are also encoded into \\(N\\)-dimensional one-hot vectors \\(L_{train} \\in \\mathbf{R}^{NK \\times N}\\):" + }, + { + "type": "equation", + "bbox": [ + 0.221, + 0.375, + 0.482, + 0.391 + ], + "angle": 0, + "content": "\\[\nF _ {t r a i n} = \\mathbf {f} _ {C M} \\left(I _ {K}\\right) \\tag {12}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.209, + 0.398, + 0.482, + 0.414 + ], + "angle": 0, + "content": "\\[\nL _ {\\text {t r a i n}} = \\mathbf {O n e H o t} \\left(I _ {N}\\right) \\tag {13}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.417, + 0.484, + 0.553 + ], + "angle": 0, + "content": "The feature extraction step is performed in an offline fashion to ensure persistent storage of high-quality feature representations for support set instances, thereby preserving discriminative semantic characteristics and spatial-aware contextual patterns in a memory-efficient manner. Then, these features and their corresponding label encodings are systematically cached to establish a comprehensive knowledge base that facilitates adaptive domain-aware detection while mitigating catastrophic forgetting." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.553, + 0.484, + 0.749 + ], + "angle": 0, + "content": "Instance Search. After constructing the instance feature caching, given a query image \\(\\mathcal{L}\\), the proposed method first feeds \\(\\mathcal{L}\\) into both the Region Proposal Network and the Vision Transformer encoder to generate candidate regions and extract their deep features, respectively. These region proposals are then combined with the corresponding instance-level features in \\(\\mathcal{L}\\) to derive a query vector \\(f_{test}\\) for each candidate bounding box. Then, the proposed method achieves the most relevant instance feature lookup and finally calculate the adaptation representation \\(A \\times L_{train}\\) for the target domain, where \\(\\mathbf{A} \\in \\mathbf{R}^{NK}\\) is the affinity matrix between query vector and instance feature caching, being defined as:" + }, + { + "type": "equation", + "bbox": [ + 0.18, + 0.756, + 0.482, + 0.774 + ], + "angle": 0, + "content": "\\[\n\\mathbf {A} = \\exp (- \\beta (1 - f _ {\\text {t e s t}} F _ {\\text {t r a i n}} ^ {T})) \\tag {14}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.78, + 0.483, + 0.84 + ], + "angle": 0, + "content": "Ultimately, the domain adaptation representation is fed into the classification and regression branches of the original detection framework to calibrate prediction results from the open-set detector:" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.84, + 0.484, + 0.902 + ], + "angle": 0, + "content": "1. Classification Enhancement: The similarity distribution between query features and cached features is leveraged to refine confidence estimates for the target domain categories through contrastive alignment." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.092, + 0.905, + 0.152 + ], + "angle": 0, + "content": "2. Localization Refinement: Retrieved instance localization priors are incorporated to constrain bounding box regression, thereby mitigating cross-domain localization biases caused by domain shifts." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.153, + 0.905, + 0.213 + ], + "angle": 0, + "content": "The above two strategies ensure that the detector adaptively aligns domain-invariant semantic representations while suppressing spurious correlations introduced by cross-domain discrepancies." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.222, + 0.673, + 0.236 + ], + "angle": 0, + "content": "5.1.2. Training Details" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.24, + 0.907, + 0.378 + ], + "angle": 0, + "content": "A single RTX A800 GPU is used for the experiments. The model is pre-trained on COCO and fine-tuned on novel support images. For the DeepFruit[60], Carpk[20], and CarDD[76], the specific hyper-parameters settings are shown in the Tab. 2. The tailored combination of learning rates and epoch schedules reflects a fine-grained tuning strategy to address domain heterogeneity across datasets, ensuring optimal trade-offs between generalization and task-specific optimization." + }, + { + "type": "table_caption", + "bbox": [ + 0.55, + 0.391, + 0.868, + 0.406 + ], + "angle": 0, + "content": "Table 2. Team X-Few: the hyper-parameters settings." + }, + { + "type": "table", + "bbox": [ + 0.518, + 0.417, + 0.922, + 0.482 + ], + "angle": 0, + "content": "
hyperparameter/shotDeepFruit [60]Carpk [20]CarDD [76]
151015101510
Batch size161616161616161616
Initial lr1e-31e-31e-31e-41e-41e-41e-31e-31e-3
Epoch40100200408010040100200
" + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.508, + 0.585, + 0.523 + ], + "angle": 0, + "content": "5.2. MM" + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.531, + 0.684, + 0.546 + ], + "angle": 0, + "content": "5.2.1. Proposed Method" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.55, + 0.907, + 0.67 + ], + "angle": 0, + "content": "The MM team proposes a novel DFE-ViT method for CD-FSOD, in the closed set setting, which only takes COCO as the source data and transfers the model to a novel target. As in Fig. 15, the proposed DFE-ViT method is built upon one open-set detector (DE-ViT) and finetuned using a few labeled instances from the target domain. New improvements include Instance Feature Enhancement, ROI Feature Enhancement." + }, + { + "type": "image", + "bbox": [ + 0.518, + 0.686, + 0.903, + 0.809 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.536, + 0.822, + 0.882, + 0.836 + ], + "angle": 0, + "content": "Figure 15. Team MM: overall framework of the DFE-ViT." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.856, + 0.906, + 0.903 + ], + "angle": 0, + "content": "Specifically, given \\( S \\) and \\( q \\) as input, DFE-ViT follows a similar pipeline as DE-ViT to obtain instance features \\( F_{ins} \\), region proposals \\( R_{q} \\), visual features \\( F_{q} \\), and ROI features" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.925, + 0.508, + 0.937 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.09, + 0.092, + 0.482, + 0.167 + ], + "angle": 0, + "content": "\\(F_{q_{roi}}\\). However, different from directly using \\(F_{ins}\\) to derive the class prototypes, an Instance Feature Enhancement module (IFE) and an ROI Feature Enhancement module (RFE) are proposed to enhance feature representation from both instance-level and ROI-level perspectives." + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.168, + 0.483, + 0.379 + ], + "angle": 0, + "content": "The IFE module adopts a residual CBAM structure to refine \\( F_{ins}^{ob} \\), enabling the network to adaptively emphasize informative channels and spatial regions. To guide this attention process more explicitly, a dedicated CBAM loss \\( \\mathcal{L}_{cbam} \\) is designed, which encourages the enhanced instance features to align with salient regions in both spatial and channel dimensions. Furthermore, to enhance semantic alignment, a class prototype enhancement mechanism is further incorporated where each object instance interacts with its corresponding class prototype via cross-attention, ensuring more discriminative and category-aware features. The output of IFE is optimized jointly with the standard detection losses, including the localization loss \\( \\mathcal{L}_{loc} \\), classification loss \\( \\mathcal{L}_{cls} \\), and the attention-guided loss \\( \\mathcal{L}_{cbam} \\)." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.379, + 0.483, + 0.514 + ], + "angle": 0, + "content": "For ROI features, this team introduces RFE based on a Variational Autoencoder (VAE). Each ROI feature \\( F_{q_{roi}} \\) is encoded into a latent distribution and then reconstructed, which enables learning a more robust and expressive representation. A reconstruction loss \\( \\mathcal{L}_{vae} \\) is employed to ensure fidelity and consistency in the learned latent space. This ROI-level enhancement complements the instance-level refinement, offering a more diversified and generalized feature representation." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.515, + 0.483, + 0.56 + ], + "angle": 0, + "content": "The top modules including the detection head \\( M_{DET} \\) and the classification head \\( M_{CLS} \\) are fine-tuned using the combined objective:" + }, + { + "type": "equation", + "bbox": [ + 0.131, + 0.571, + 0.482, + 0.587 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} = \\mathcal {L} _ {l o c} + \\mathcal {L} _ {c l s} + \\alpha * \\mathcal {L} _ {c b a m} + \\beta * \\mathcal {L} _ {v a e}. \\tag {15}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.598, + 0.483, + 0.809 + ], + "angle": 0, + "content": "Instance Feature Enhancement. The IFE module aims to refine instance features by integrating spatial/channel attention and semantic guidance. Given input instance features \\( F_{ins} \\in \\mathbb{R}^{B \\times C \\times H \\times W} \\), it first applies a residual CBAM to obtain spatially and channel-refined features \\( F_{cbam} \\). Then, class prototypes \\( P \\in \\mathbb{R}^{N \\times C} \\) are used to semantically enhance the instance features via a cross-attention mechanism. Specifically, query and key projections are computed as \\( Q = W_qF_{ins} \\) and \\( K = W_kP \\), followed by attention: \\( A = \\text{softmax}(QK^\\top / \\sqrt{d}) \\). The attended prototype features are added with a learnable weight \\( \\gamma \\), yielding \\( F_{proto} \\). The final enhanced features are computed as \\( F_{enh} = F_{cbam} + F_{proto} \\), which are more discriminative for downstream detection." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.811, + 0.483, + 0.902 + ], + "angle": 0, + "content": "ROI Feature Enhancement. The RFE module is based on a Variational Autoencoder and class prototype computation. As shown in Fig. 15, the orange modules represent the newly proposed contributions: using VAE to model ROI features and enriching them with class prototypes. Given input ROI features \\( x \\in \\mathbb{R}^{N \\times C \\times k \\times k} \\), VAE" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.092, + 0.905, + 0.26 + ], + "angle": 0, + "content": "encodes \\( x \\) into latent mean \\( \\mu \\in \\mathbb{R}^{N \\times d} \\) and log-variance \\( \\log \\sigma^2 \\in \\mathbb{R}^{N \\times d} \\) through linear layers. Latent variables are sampled as \\( z = \\mu + \\sigma \\odot \\epsilon \\) using the reparameterization trick. Then, \\( z \\) is decoded to reconstruct the ROI features \\( \\hat{x} = \\mathrm{Decoder}(z) \\). The reconstruction loss is computed as \\( L_{\\text{recon}} = \\frac{1}{N} \\sum_{i=1}^{N} \\| \\hat{x}_i - x_i \\|^2 \\), and the KL divergence loss regularizes the latent distribution: \\( L_{KL} = -\\frac{1}{2} \\sum_{i=1}^{N} (1 + \\log \\sigma_i^2 - \\mu_i^2 - \\sigma_i^2) \\). The total VAE loss is \\( L_{vae} = L_{\\text{recon}} + L_{KL} \\). Finally, class prototypes are computed to further enhance feature representation across categories." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.269, + 0.673, + 0.284 + ], + "angle": 0, + "content": "5.2.2. Training Details" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.288, + 0.907, + 0.514 + ], + "angle": 0, + "content": "The model is trained in the \"pretrain, finetune, and test\" pipeline. Specifically, the base DE-ViT model pretrained on COCO is taken, then the \\( M_{DET} \\), \\( M_{CLS} \\), \\( IFE \\) and \\( RFE \\) are tuned on novel support images \\( S \\) using the loss as in Eq. 15. The hyperparameter \\( \\alpha \\) temperature for \\( \\mathcal{L}_{cbam} \\), \\( \\beta \\) temperature for \\( \\mathcal{L}_{vae} \\) are set as 0.3, 0.4 for all the target datasets. While the value \\( N_{dom} \\) means the number of virtual domains depending on the number of target classes \\( N \\), specifically, \\( N_{dom} = 2 * N \\). The hyperparameter Top-K (\\( K \\)) in DE-ViT is set to 5. For datasets with the number of classes \\( N \\) less than 5, \\( K \\) is set to \\( N \\). The trainable parameters are finetuned on 1-shot around 80 epochs, and on 5/10-shot around 50 epochs. The SGD with a learning rate of 0.002 is used as the optimizer. Experiments are performed on four A6000 GPUs." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.526, + 0.585, + 0.54 + ], + "angle": 0, + "content": "5.3. FSV" + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.549, + 0.684, + 0.563 + ], + "angle": 0, + "content": "5.3.1. Proposed Method" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.568, + 0.905, + 0.825 + ], + "angle": 0, + "content": "The FSV team proposes an enhancement to the prototype-based detection for the cross-domain few-shot object detection (CD-FSOD) challenge under the closed-source setting, based on the CD-ViTO baseline model, as shown in Figure 16. Based on observations of the existing approach, this team found that CD-FSOD faces three key challenges. First, few-shot learning inherently suffers from limited example diversity. Second, conventional binary masking treats all spatial locations within an object region equally, which fails to prioritize more discriminative central regions over potentially noisy boundary areas. Third, standard cosine similarity calculations between query features and prototypes lack proper calibration, resulting in suboptimal separability across domain shifts. To solve these three challenges, this team explores three techniques: (1) Support Set Data Augmentation, (2) Soft Mask-Based Prototype Aggregation, and (3) Temperature-Scaled Similarity Calibration." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.826, + 0.905, + 0.902 + ], + "angle": 0, + "content": "Support Set Data Augmentation. For the support images, the proposed approach constructs a stochastic augmentation function to increase the diversity of the samples. DINOv2 [48] is used as the feature extraction backbone for the augmented data, for its robust self-supervised learning capa" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.925, + 0.509, + 0.937 + ], + "angle": 0, + "content": "14" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.109, + 0.093, + 0.473, + 0.216 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.116, + 0.23, + 0.458, + 0.246 + ], + "angle": 0, + "content": "Figure 16. Team FSV: overview of the proposed method." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.257, + 0.483, + 0.332 + ], + "angle": 0, + "content": "bilities and effective cross-domain transfer. The augmentation pipeline consists of a composition of transformations including Random Saturation, Random Contrast, Random Brightness, Random Flip, Random Rotation, Random Crop, Random Erasing, and Resize Shortest Edge." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.333, + 0.484, + 0.501 + ], + "angle": 0, + "content": "Soft Mask-Based Prototype Aggregation. To prioritize more discriminative central regions over potentially noisy boundary areas, the conventional binary masks are replaced by Gaussian soft masks to create soft spatial attention. Let \\( F_{ins} = \\{F_{ins}^{ob}, F_{ins}^{bg}\\} \\) denote the extracted instance features and \\( M \\) denote the binary mask of an instance. The soft mask could be defined \\( \\tilde{M} \\) as: \\( \\tilde{M} = \\frac{G_{\\sigma}(M)}{\\max G_{\\sigma}(M)} \\), where \\( G_{\\sigma} \\) is the Gaussian filter with standard deviation parameter \\( \\sigma \\). The extracted instance features for foreground objects \\( F_{ins}^{ob} \\) are then weighted by the soft mask \\( \\tilde{M} \\), used as the initialization for learnable instance features." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.502, + 0.484, + 0.639 + ], + "angle": 0, + "content": "Temperature-Scaled Similarity Calibration. Finally, to calibrate image features to other domains, the proposed approach takes temperature scaling to make the final prototypes better match those in the new domain, which is a simple yet effective strategy to improve the discriminability of similarity scores. Let \\( F_{q_{roi}} \\) denote the ROI features extracted from a query image using DINOv2. \\( F_{pro} \\) denotes the prototype vector. The temperature scaling is applied during the cosine similarity computation as" + }, + { + "type": "equation", + "bbox": [ + 0.196, + 0.648, + 0.483, + 0.686 + ], + "angle": 0, + "content": "\\[\ns _ {\\tau} = \\frac {F _ {q _ {r o i}} ^ {\\top} F _ {p r o}}{\\tau \\cdot \\| F _ {q _ {r o i}} \\| \\cdot \\| F _ {p r o} \\|}, \\tag {16}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.694, + 0.484, + 0.754 + ], + "angle": 0, + "content": "where \\(\\tau\\) is a temperature parameter that controls the sharpness of the similarity distribution. By tuning the temperature parameter, the entropy of the output distribution can be better modulated." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.761, + 0.303, + 0.777 + ], + "angle": 0, + "content": "5.3.2. Implementation Details" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.78, + 0.484, + 0.901 + ], + "angle": 0, + "content": "The training procedure utilizes only the provided few-shot datasets (1-shot, 5-shot, and 10-shot variants), without incorporating additional external data. The trainable parameters are finetuned for each testing dataset around 100 epochs. The training batch size is 16, with a base learning rate of 0.002. The parameter \\(\\sigma\\) in Soft Mask-Based Prototype Aggregation is set to 2.0. The parameter \\(\\tau\\) in Temperature-Scaled Similarity Calibration is set to 0.07." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.092, + 0.899, + 0.107 + ], + "angle": 0, + "content": "Experiments are performed on four NVIDIA A100 GPUs." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.116, + 0.583, + 0.131 + ], + "angle": 0, + "content": "5.4. IPC" + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.139, + 0.685, + 0.154 + ], + "angle": 0, + "content": "5.4.1. Proposed Method" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.157, + 0.906, + 0.264 + ], + "angle": 0, + "content": "The IPC team utilizes CD-ViTO as the baseline, which is an improved version of the DE-ViT method, designed to enhance the cross-domain detection capability. To further mitigate performance degradation caused by cross-domain discrepancies and a very small number of test domain reference examples, this team was inspired by [59] to introduce a test-time adaptation algorithm during the inference phase." + }, + { + "type": "image", + "bbox": [ + 0.523, + 0.283, + 0.9, + 0.481 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.512, + 0.496, + 0.907, + 0.607 + ], + "angle": 0, + "content": "Figure 17. Team IPC: overview of the proposed approach. The upper section represents the baseline CD-ViTO fine-tuning phase; the lower section represents the test-time adaptation (TTA) process. The TTA procedure operates without access to the original training data, updating the fine-tuned detector on a single testing image before making a prediction. Crucially, only the mask prediction module in CD-ViTO undergoes gradient updates during TTA iterations." + }, + { + "type": "image", + "bbox": [ + 0.518, + 0.626, + 0.905, + 0.695 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.512, + 0.708, + 0.906, + 0.763 + ], + "angle": 0, + "content": "Figure 18. Team IPC: by iteratively retaining proposals (yellow boxes \\(\\square\\)) with high confidence scores as pseudo labels (red boxes \\(\\square\\)), the model can effectively filter out most invalid detection boxes." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.78, + 0.907, + 0.903 + ], + "angle": 0, + "content": "To be specific, the proposed approach employs an iterative process as shown in Fig 17. During each iteration \\( t \\) (where \\( t \\in \\{1, \\dots, T\\} \\)), the existing detector \\( \\theta_{t-1} \\) generates predictions \\( D_t = \\{(b_{t,i}, p_{t,i}) : \\forall i\\} \\) for image \\( I \\), with \\( b_{t,i} \\) representing the \\( i^{th} \\) object's bounding box and \\( p_{t,i} \\in [0,1]^K \\) denoting the class probability distribution across \\( K \\) categories. The detection confidence \\( c_{t,i} \\in [0,1] \\) is determined by the highest probability in \\( p_{t,i} \\), while the" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.925, + 0.509, + 0.937 + ], + "angle": 0, + "content": "15" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.09, + 0.092, + 0.482, + 0.198 + ], + "angle": 0, + "content": "corresponding class index gives the predicted object category \\( y_{t,i} \\in \\{1, \\dots, K\\} \\). Confident detections are then selected as pseudo-labels as illustrated in Fig 18: \\( P_t = \\{(b_{t,i}, y_{t,i}) : c_{t,i} > \\lambda_{conf}\\} \\), with \\( \\lambda_{conf} \\) serving as the confidence cutoff. The detector is subsequently refined through gradient descent on these pseudo-labels, yielding an improved model \\( \\theta_t \\)." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.198, + 0.482, + 0.319 + ], + "angle": 0, + "content": "For the initial iteration \\((t = 1)\\), the detector \\(\\theta_{t - 1}\\) is initialized as \\(\\theta_0\\), which was pre-trained on source domain data. Upon completion of the final iteration \\((t = T)\\), the optimized model \\(\\theta_T\\) produces the final predictions for \\(I\\). Notably, this self-training paradigm maintains the original network architecture and operates without requiring access to source data or any other pretrained foundation models during adaptation." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.328, + 0.25, + 0.342 + ], + "angle": 0, + "content": "5.4.2. Training Details" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.347, + 0.483, + 0.483 + ], + "angle": 0, + "content": "A single NVIDIA A6000 GPU is used for all experiments. The proposed method extends the CD-ViTO baseline through a test-time adaptation pipeline, initialized with k-shot instance fine-tuning on novel support datasets. During inference, the proposed method processes each test image using momentum SGD (\\(\\beta = 0.9\\), \\(\\alpha = 0.001\\)) to exclusively update the mask prediction module through 5 iterations. For all experimental datasets, the cut-off confidence threshold \\(\\lambda_{conf}\\) is empirically set to 0.6." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.493, + 0.163, + 0.508 + ], + "angle": 0, + "content": "5.5.LJY" + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.515, + 0.262, + 0.531 + ], + "angle": 0, + "content": "5.5.1. Proposed Method" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.535, + 0.483, + 0.655 + ], + "angle": 0, + "content": "As shown in Fig. 19, the LJY team proposes similarity calibrated prototype refinement network, which utilizes query-aware guidelines to generate prototypes. The network contains a pretrained DINOv2 ViT, a region proposal network, an ROI align module, a detection head, and a one-vs-rest classification head. During the finetuning stage, the parameters of DINOv2 ViT are frozen. Only the parameters of the detection head and the classification head are finetuned." + }, + { + "type": "image", + "bbox": [ + 0.097, + 0.667, + 0.482, + 0.837 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.134, + 0.846, + 0.438, + 0.86 + ], + "angle": 0, + "content": "Figure 19. Team LJY: overall framework of SCPR." + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.87, + 0.483, + 0.901 + ], + "angle": 0, + "content": "Given a query image \\( \\pmb{q} \\in \\mathbb{R}^{H \\times W \\times C} \\) and a set of support images \\( S \\), where \\( H, W \\) and \\( C \\) stand for the num-" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.092, + 0.907, + 0.653 + ], + "angle": 0, + "content": "ber of height, width and channels, respectively, the DINOv2 ViT backbone is used for obtaining query patches \\( \\pmb{F}_{q} \\in \\mathbb{R}^{d} \\) and support patches \\( \\pmb{F}_{s} \\). Then, two linear layers are applied to project the query patches \\( \\pmb{F}_{q} \\) to \\( \\pmb{Q} \\) and \\( \\pmb{K}_{1} \\) and project the support patches \\( \\pmb{F}_{s} \\) to \\( \\pmb{K}_{2} \\). The query patches \\( \\pmb{F}_{q} \\) and the support patches \\( \\pmb{F}_{s} \\) are then concatenated to obtain \\( \\pmb{F}_{cat} = \\text{Concat}(\\pmb{F}_{q}, \\pmb{F}_{s}) \\). The concatenated patches \\( \\pmb{F}_{cat} \\) are projected to obtain \\( \\pmb{V} \\). To align the query patches and the support patches, the proposed method conducts scaled dot product on query patches \\( \\pmb{F}_{q} \\) and itself to obtain self attention score \\( A_{self} = \\frac{\\pmb{Q}\\pmb{K}_{1}^{\\top}}{\\sqrt{d}} \\). Meanwhile, cross-attention score is computed using cosine similarity to ensure scale invariance \\( A_{cross} = \\frac{\\pmb{Q}\\pmb{K}_{2}^{\\top}}{\\|\\pmb{Q}\\|_{2}\\|\\pmb{K}_{2}\\|_{2} + \\epsilon} \\) where \\( \\epsilon \\) is a small constant to avoid division by zero. The combined attention score is obtained by concatenating both and then be normalized by the softmax operation \\( A = \\text{Softmax}(\\text{Concat}(\\pmb{A}_{self}, \\pmb{A}_{cross})) \\). The refined query representation is obtained by applying attention weights to the value matrix \\( \\hat{\\pmb{F}}_{q} = \\pmb{F}_{q} + \\pmb{A}\\pmb{V} \\). With the aligned query patches, the proposed method then generates prototypes with query-perceptual information. To further calibrate support features, their cosine similarity with the refined query is computed: \\( Sim = \\text{Softmax}\\left(\\frac{\\pmb{F}_{s}\\pmb{F}_{q}^{\\top}}{\\|\\pmb{F}_{s}\\|_{2}\\|\\pmb{F}_{q}\\|_{2} + \\epsilon}\\right) \\). This similarity is used to re-weight the support representations: \\( \\hat{\\pmb{F}}_{s} = \\pmb{F}_{s} + Sim*\\hat{\\pmb{F}}_{q} \\). A learnable weighting function is applied via a sigmoid transformation: \\( W = Sigmoid(FC(\\hat{\\pmb{F}}_{s})) \\). Ensuring adaptive feature scaling: \\( \\hat{\\pmb{F}}_{s} = W\\cdot \\hat{\\pmb{F}}_{s} \\). The updated support features are then averaged across the K-shot dimension to derive refined prototypes: \\( P = \\frac{1}{K}\\sum_{i=1}^{K}\\hat{\\pmb{F}}_{s} \\). Finally, the query-aware prototype refinement is performed using a weighted combination of the refined prototypes and the original prototypes: \\( \\hat{\\pmb{P}} = \\alpha\\cdot\\pmb{P} + (1-\\alpha)\\cdot\\frac{1}{K}\\sum_{i=1}^{K}\\pmb{F}_{s} \\). This final prototype representation retains both source-domain knowledge and query-specific adaptability, effectively enhancing cross-domain few-shot detection performance." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.66, + 0.673, + 0.675 + ], + "angle": 0, + "content": "5.5.2. Training Details" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.679, + 0.906, + 0.754 + ], + "angle": 0, + "content": "The proposed modules are fine-tuned on novel support images, with the base DE-ViT pretrained on COCO taken as initialization. The SGD with a learning rate of 0.002 is used as the optimizer. All experiments are conducted on two RTX3090 GPUs. The mAPs for 1/5/10 shots are reported." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.768, + 0.673, + 0.785 + ], + "angle": 0, + "content": "Acknowledgments" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.792, + 0.906, + 0.9 + ], + "angle": 0, + "content": "INSAIT, Sofia University \"St. Kliment Ohridski\". Partially funded by the Ministry of Education and Science of Bulgaria's support for INSAIT as part of the Bulgarian National Roadmap for Research Infrastructure. This work was partially supported by the Humboldt Foundation. We thank the NTIRE 2025 sponsors: ByteDance, Meituan, Kuaishou, and University of Wurzburg (Computer Vision Lab)." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.925, + 0.509, + 0.937 + ], + "angle": 0, + "content": "16" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.092, + 0.09, + 0.304, + 0.106 + ], + "angle": 0, + "content": "A. Teams and affiliations" + }, + { + "type": "title", + "bbox": [ + 0.092, + 0.117, + 0.236, + 0.132 + ], + "angle": 0, + "content": "NTIRE 2025 team" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.141, + 0.483, + 0.172 + ], + "angle": 0, + "content": "Title: NTIRE 2025 Challenge on Cross-Domain Few-Shot Object Detection: Methods and Results." + }, + { + "type": "title", + "bbox": [ + 0.092, + 0.172, + 0.165, + 0.184 + ], + "angle": 0, + "content": "Members:" + }, + { + "type": "text", + "bbox": [ + 0.094, + 0.187, + 0.32, + 0.202 + ], + "angle": 0, + "content": "Yuqian Fu1 (yuqian.fu@insait.ai)," + }, + { + "type": "text", + "bbox": [ + 0.094, + 0.203, + 0.369, + 0.217 + ], + "angle": 0, + "content": "Xingyu Qiu² (xyqiu24@m.fudan.edu.cn)," + }, + { + "type": "text", + "bbox": [ + 0.094, + 0.217, + 0.294, + 0.231 + ], + "angle": 0, + "content": "Bin Ren\\(^{3,4}\\) (bin.ren@unitn.it)," + }, + { + "type": "text", + "bbox": [ + 0.094, + 0.232, + 0.353, + 0.247 + ], + "angle": 0, + "content": "Yanwei \\(\\mathrm{Fu}^2\\) (yanweifu@fudan.edu.cn)," + }, + { + "type": "text", + "bbox": [ + 0.094, + 0.248, + 0.422, + 0.262 + ], + "angle": 0, + "content": "Radu Timofte\\(^{5}\\) (radu.timofte@uni-wuerzburg.de)," + }, + { + "type": "text", + "bbox": [ + 0.094, + 0.263, + 0.332, + 0.277 + ], + "angle": 0, + "content": "Nicu Sebe4 (niculae.sebe@unitn.it)," + }, + { + "type": "text", + "bbox": [ + 0.094, + 0.278, + 0.401, + 0.292 + ], + "angle": 0, + "content": "Ming-Hsuan Yang\\(^{6}\\) (mhyang@ucmerced.edu)," + }, + { + "type": "text", + "bbox": [ + 0.094, + 0.293, + 0.352, + 0.307 + ], + "angle": 0, + "content": "Luc Van Gool1 (luc.vangool@insait.ai)" + }, + { + "type": "title", + "bbox": [ + 0.092, + 0.308, + 0.175, + 0.322 + ], + "angle": 0, + "content": "Affiliations:" + }, + { + "type": "text", + "bbox": [ + 0.094, + 0.323, + 0.479, + 0.337 + ], + "angle": 0, + "content": "1 INSAIT, Sofia University St. Kliment Ohridski, Bulgaria" + }, + { + "type": "text", + "bbox": [ + 0.094, + 0.338, + 0.266, + 0.352 + ], + "angle": 0, + "content": "\\(^{2}\\) Fudan University, China" + }, + { + "type": "text", + "bbox": [ + 0.094, + 0.353, + 0.262, + 0.367 + ], + "angle": 0, + "content": "3 University of Pisa, Italy" + }, + { + "type": "text", + "bbox": [ + 0.094, + 0.368, + 0.278, + 0.383 + ], + "angle": 0, + "content": "4 University of Trento, Italy" + }, + { + "type": "text", + "bbox": [ + 0.094, + 0.383, + 0.48, + 0.398 + ], + "angle": 0, + "content": "5 Computer Vision Lab, University of Würzburg, Germany" + }, + { + "type": "text", + "bbox": [ + 0.094, + 0.398, + 0.429, + 0.412 + ], + "angle": 0, + "content": "6 University of California at Merced, United States" + }, + { + "type": "list", + "bbox": [ + 0.094, + 0.323, + 0.48, + 0.412 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.092, + 0.444, + 0.174, + 0.458 + ], + "angle": 0, + "content": "MoveFree" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.468, + 0.483, + 0.499 + ], + "angle": 0, + "content": "Title: Marrying MoE-powered Grounding DINO with Self-training for Cross-domain Few-shot Object Detection" + }, + { + "type": "title", + "bbox": [ + 0.092, + 0.5, + 0.164, + 0.512 + ], + "angle": 0, + "content": "Members:" + }, + { + "type": "text", + "bbox": [ + 0.094, + 0.514, + 0.382, + 0.529 + ], + "angle": 0, + "content": "Kaijin Zhang\\(^{1}\\) (zhang.kaijin1@zte.com.cn)," + }, + { + "type": "text", + "bbox": [ + 0.094, + 0.53, + 0.41, + 0.544 + ], + "angle": 0, + "content": "Qingpeng Nong1 (nong.qingpeng@zte.com.cn)," + }, + { + "type": "text", + "bbox": [ + 0.094, + 0.545, + 0.409, + 0.559 + ], + "angle": 0, + "content": "Xiugang Dong\\(^{1}\\) (dong.xiugang20@zte.com.cn)," + }, + { + "type": "text", + "bbox": [ + 0.094, + 0.56, + 0.336, + 0.574 + ], + "angle": 0, + "content": "Hong Gao\\(^{1}\\) (gao.hong@zte.com.cn)," + }, + { + "type": "text", + "bbox": [ + 0.094, + 0.575, + 0.43, + 0.59 + ], + "angle": 0, + "content": "Xiangsheng Zhou1 (zhou.xiangsheng@zte.com.cn)" + }, + { + "type": "title", + "bbox": [ + 0.092, + 0.59, + 0.175, + 0.604 + ], + "angle": 0, + "content": "Affiliations:" + }, + { + "type": "text", + "bbox": [ + 0.094, + 0.605, + 0.299, + 0.618 + ], + "angle": 0, + "content": "1 Central R & D Institute, ZTE" + }, + { + "type": "title", + "bbox": [ + 0.092, + 0.65, + 0.201, + 0.665 + ], + "angle": 0, + "content": "AI4EarthLab" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.675, + 0.483, + 0.719 + ], + "angle": 0, + "content": "Title: Enhance Then Search: An Augmentation-Search Strategy with Foundation Models for Cross-Domain Few-Shot Object Detection" + }, + { + "type": "title", + "bbox": [ + 0.092, + 0.721, + 0.163, + 0.734 + ], + "angle": 0, + "content": "Members:" + }, + { + "type": "text", + "bbox": [ + 0.094, + 0.736, + 0.421, + 0.75 + ], + "angle": 0, + "content": "Jiancheng Pan1 (jiancheng.pan_plus@gmail.com)," + }, + { + "type": "text", + "bbox": [ + 0.094, + 0.751, + 0.406, + 0.765 + ], + "angle": 0, + "content": "Yanxing Liu\\(^{2}\\) (liuyanxing21@mails.ucas.ac.cn)," + }, + { + "type": "text", + "bbox": [ + 0.094, + 0.766, + 0.315, + 0.78 + ], + "angle": 0, + "content": "Xiao He\\(^{3}\\) (xiaohewhu@163.com)," + }, + { + "type": "text", + "bbox": [ + 0.094, + 0.781, + 0.399, + 0.796 + ], + "angle": 0, + "content": "Jiahao Li1 (lijiahao23@mails.tsinghua.edu.cn)," + }, + { + "type": "text", + "bbox": [ + 0.094, + 0.797, + 0.375, + 0.81 + ], + "angle": 0, + "content": "Yuze Sun\\(^{1}\\) (syz23@mails.tsinghua.edu.cn)," + }, + { + "type": "text", + "bbox": [ + 0.094, + 0.811, + 0.376, + 0.826 + ], + "angle": 0, + "content": "Xiaomeng Huang\\(^{1}\\) (hxm@tsinghua.edu.cn)" + }, + { + "type": "title", + "bbox": [ + 0.092, + 0.827, + 0.175, + 0.84 + ], + "angle": 0, + "content": "Affiliations:" + }, + { + "type": "text", + "bbox": [ + 0.094, + 0.841, + 0.24, + 0.856 + ], + "angle": 0, + "content": "\\(^{1}\\) Tsinghua University" + }, + { + "type": "text", + "bbox": [ + 0.094, + 0.857, + 0.394, + 0.871 + ], + "angle": 0, + "content": "\\(^{2}\\) University of Chinese Academy of Sciences" + }, + { + "type": "text", + "bbox": [ + 0.094, + 0.871, + 0.226, + 0.886 + ], + "angle": 0, + "content": "\\(^{3}\\) Wuhan University" + }, + { + "type": "list", + "bbox": [ + 0.094, + 0.841, + 0.394, + 0.886 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.091, + 0.574, + 0.105 + ], + "angle": 0, + "content": "IDCFS" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.113, + 0.905, + 0.143 + ], + "angle": 0, + "content": "Title: Pseudo-Label Driven Vision-Language Grounding for Cross-Domain Few-Shot Object Detection" + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.144, + 0.586, + 0.157 + ], + "angle": 0, + "content": "Members:" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.158, + 0.815, + 0.173 + ], + "angle": 0, + "content": "Zhenyu Zhang\\(^{1}\\) (m202273680@hust.edu.cn)," + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.174, + 0.726, + 0.188 + ], + "angle": 0, + "content": "Ran Ma1 (ranma@hust.edu.cn)," + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.189, + 0.765, + 0.204 + ], + "angle": 0, + "content": "Yuhan Liu1 (yuhan.liu@hust.edu.cn)," + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.205, + 0.786, + 0.219 + ], + "angle": 0, + "content": "Zijian Zhuang\\(^{1}\\) (zhuangzj@hust.edu.cn)," + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.219, + 0.737, + 0.234 + ], + "angle": 0, + "content": "Shuai Yi\\(^{1}\\) (yishuai@hust.edu.cn)," + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.235, + 0.77, + 0.249 + ], + "angle": 0, + "content": "Yixiong Zou1 (yixiongz@hust.edu.cn)" + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.25, + 0.598, + 0.263 + ], + "angle": 0, + "content": "Affiliations:" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.264, + 0.905, + 0.295 + ], + "angle": 0, + "content": "1 School of Computer Science and Technology, Huazhong University of Science and Technology" + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.318, + 0.684, + 0.333 + ], + "angle": 0, + "content": "FDUROILab_Lenovo" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.34, + 0.905, + 0.37 + ], + "angle": 0, + "content": "Title: Efficient Tuning and MLLM-Based Post Prcessing for CDFSOD" + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.371, + 0.586, + 0.384 + ], + "angle": 0, + "content": "Members:" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.386, + 0.806, + 0.401 + ], + "angle": 0, + "content": "Lingyi Hong1 (lyhong22@m.fudan.edu.cn)," + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.402, + 0.819, + 0.416 + ], + "angle": 0, + "content": "Mingxi Cheng1(mxchen24@m.fudan.edu.cn)," + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.417, + 0.721, + 0.431 + ], + "angle": 0, + "content": "Runze Li\\(^{2}\\)(lirz7@lenovo.com)," + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.432, + 0.804, + 0.446 + ], + "angle": 0, + "content": "Xingdong Sheng\\(^{2}\\)(shengxd1@lenovo.com)," + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.447, + 0.816, + 0.462 + ], + "angle": 0, + "content": "Wenqiang Zhang\\(^{1,3}\\) (wqzhang@fudan.edu.cn)" + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.463, + 0.598, + 0.476 + ], + "angle": 0, + "content": "Affiliations:" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.477, + 0.905, + 0.506 + ], + "angle": 0, + "content": "\\(^{1}\\) Shanghai Key Lab of Intelligent Information Processing, School of Computer Science, Fudan University" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.507, + 0.643, + 0.52 + ], + "angle": 0, + "content": "2 Lenovo Research" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.522, + 0.905, + 0.567 + ], + "angle": 0, + "content": "3 Engineering Research Center of AI & Robotics, Ministry of Education, Academy for Engineering & Technology, Fudan University" + }, + { + "type": "list", + "bbox": [ + 0.514, + 0.477, + 0.905, + 0.567 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.591, + 0.6, + 0.606 + ], + "angle": 0, + "content": "HUSTLab" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.613, + 0.905, + 0.643 + ], + "angle": 0, + "content": "Title: Prompt and Finetune Grounding DINO for Cross-Domain Few-shot Object Detection" + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.644, + 0.586, + 0.657 + ], + "angle": 0, + "content": "Members:" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.659, + 0.805, + 0.674 + ], + "angle": 0, + "content": "Weisen Chen\\(^{1}\\) (U202115027@hust.edu.cn)," + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.675, + 0.771, + 0.689 + ], + "angle": 0, + "content": "Yongxin Yan\\(^{1}\\) (2585856499@qq.com)," + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.689, + 0.772, + 0.704 + ], + "angle": 0, + "content": "Xinguo Chen\\(^{2}\\) (327715@whut.edu.cn)," + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.705, + 0.794, + 0.719 + ], + "angle": 0, + "content": "Yuanjie Shao\\(^{1}\\) (shaoyuanjie@hust.edu.cn)," + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.72, + 0.813, + 0.734 + ], + "angle": 0, + "content": "Zhengrong Zuo\\(^{1}\\) (zhrzuo@main.hust.edu.cn)," + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.735, + 0.737, + 0.75 + ], + "angle": 0, + "content": "Nong Sang\\(^{1}\\) (nsang@hust.edu.cn)" + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.765, + 0.598, + 0.779 + ], + "angle": 0, + "content": "Affiliations:" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.78, + 0.905, + 0.809 + ], + "angle": 0, + "content": "1 School of Artificial Intelligence and Automation, Huazhong University of Science and Technology" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.81, + 0.905, + 0.84 + ], + "angle": 0, + "content": "\\(^{2}\\) School of Information Engineering, Wuhan University of Technology" + }, + { + "type": "list", + "bbox": [ + 0.515, + 0.78, + 0.905, + 0.84 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.849, + 0.6, + 0.865 + ], + "angle": 0, + "content": "TongjiLab" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.871, + 0.905, + 0.902 + ], + "angle": 0, + "content": "Title: ProtoDINO: Cross-Domain Few-Shot Object Detection via GroundingDINO and CLIP-Based Prototypes" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.925, + 0.509, + 0.937 + ], + "angle": 0, + "content": "17" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.092, + 0.092, + 0.165, + 0.105 + ], + "angle": 0, + "content": "Members:" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.107, + 0.32, + 0.121 + ], + "angle": 0, + "content": "Hao \\(\\mathbf{W}\\mathbf{u}^{1}\\) (haowu@tongji.edu.cn)," + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.122, + 0.18, + 0.135 + ], + "angle": 0, + "content": "Haoran Sun" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.137, + 0.175, + 0.151 + ], + "angle": 0, + "content": "Affiliations:" + }, + { + "type": "text", + "bbox": [ + 0.094, + 0.152, + 0.221, + 0.167 + ], + "angle": 0, + "content": "\\(^{1}\\) Tongji University" + }, + { + "type": "title", + "bbox": [ + 0.092, + 0.192, + 0.167, + 0.206 + ], + "angle": 0, + "content": "Manifold" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.214, + 0.483, + 0.245 + ], + "angle": 0, + "content": "Title: CDFSOD Challenge: Using Grounding-DINO Proposals and ResNet Embeddings" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.246, + 0.163, + 0.258 + ], + "angle": 0, + "content": "Members:" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.26, + 0.348, + 0.274 + ], + "angle": 0, + "content": "Shuming Hu1 (hsm123@nudt.edu.cn)," + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.275, + 0.179, + 0.29 + ], + "angle": 0, + "content": "Yan Zhang1," + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.291, + 0.196, + 0.305 + ], + "angle": 0, + "content": "Zhiguang Shi1," + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.306, + 0.172, + 0.32 + ], + "angle": 0, + "content": "Yu Zhang1," + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.321, + 0.18, + 0.335 + ], + "angle": 0, + "content": "Chao Chen1," + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.336, + 0.165, + 0.35 + ], + "angle": 0, + "content": "Tao Wang" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.352, + 0.175, + 0.365 + ], + "angle": 0, + "content": "Affiliations:" + }, + { + "type": "text", + "bbox": [ + 0.094, + 0.366, + 0.392, + 0.381 + ], + "angle": 0, + "content": "\\(^{1}\\) National University of Defense Technology" + }, + { + "type": "title", + "bbox": [ + 0.092, + 0.406, + 0.138, + 0.42 + ], + "angle": 0, + "content": "MXT" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.429, + 0.483, + 0.459 + ], + "angle": 0, + "content": "Title: Domain Adaptation Enhancement Module (DAEM) for Cross-Domain Few-Shot Object Detection" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.46, + 0.163, + 0.472 + ], + "angle": 0, + "content": "Members:" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.474, + 0.334, + 0.488 + ], + "angle": 0, + "content": "Da Feng\\(^{1}\\) (072108208@fzu.edu.cn)," + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.489, + 0.34, + 0.504 + ], + "angle": 0, + "content": "Linhai Zhuo\\(^{1}\\) (534537916@qq.com)," + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.505, + 0.177, + 0.519 + ], + "angle": 0, + "content": "Ziming Lin" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.52, + 0.175, + 0.533 + ], + "angle": 0, + "content": "Affiliations:" + }, + { + "type": "text", + "bbox": [ + 0.094, + 0.535, + 0.228, + 0.55 + ], + "angle": 0, + "content": "\\(^{1}\\) Fuzhou University" + }, + { + "type": "title", + "bbox": [ + 0.092, + 0.575, + 0.147, + 0.588 + ], + "angle": 0, + "content": "X-Few" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.597, + 0.483, + 0.626 + ], + "angle": 0, + "content": "Title: IFC: Instance Feature Caching for Cross-Domain Few-Shot Object Detection" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.628, + 0.163, + 0.64 + ], + "angle": 0, + "content": "Members:" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.642, + 0.353, + 0.657 + ], + "angle": 0, + "content": "Yali Huang\\(^{1}\\) (hyl2024@gs.zzu.edu.cn)," + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.658, + 0.34, + 0.673 + ], + "angle": 0, + "content": "Jie Mei\\(^{1}\\) (mj123123@gs.zzu.edu.cn)," + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.673, + 0.374, + 0.688 + ], + "angle": 0, + "content": "Yiming Yang1 (yangyim637@gmail.com)," + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.688, + 0.384, + 0.702 + ], + "angle": 0, + "content": "Mi Guo\\(^{1}\\) (mimi987836724@gs.zzu.edu.cn)," + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.702, + 0.367, + 0.718 + ], + "angle": 0, + "content": "Mingyuan Jiu\\(^{1,2,3}\\) (iemyjiu@zzu.edu.cn)," + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.718, + 0.409, + 0.733 + ], + "angle": 0, + "content": "Mingliang Xu\\(^{1,2,3}\\) (iexumingliang@zzu.edu.cn)" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.734, + 0.175, + 0.747 + ], + "angle": 0, + "content": "Affiliations:" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.748, + 0.483, + 0.778 + ], + "angle": 0, + "content": "\\(^{1}\\) School of Computer and Artificial Intelligence, Zhengzhou University" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.779, + 0.483, + 0.809 + ], + "angle": 0, + "content": "\\(^{2}\\) Engineering Research Center of Intelligent Swarm Systems, Ministry of Education, Zhengzhou University" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.809, + 0.421, + 0.824 + ], + "angle": 0, + "content": "\\(^{3}\\) National SuperComputing Center in Zhengzhou" + }, + { + "type": "list", + "bbox": [ + 0.092, + 0.748, + 0.483, + 0.824 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.092, + 0.848, + 0.131, + 0.862 + ], + "angle": 0, + "content": "MM" + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.871, + 0.483, + 0.901 + ], + "angle": 0, + "content": "Title: DFE-ViT: Dual Feature Enhancement Network for Cross-Domain Few-Shot Object Detection." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.092, + 0.586, + 0.105 + ], + "angle": 0, + "content": "Members:" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.107, + 0.839, + 0.122 + ], + "angle": 0, + "content": "Maomao Xiong\\(^{1}\\) (202314866@mail.sdu.edu.cn)," + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.123, + 0.828, + 0.137 + ], + "angle": 0, + "content": "Qunshu Zhang\\(^{1}\\) (202414859@mail.sdu.edu.cn)," + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.138, + 0.799, + 0.152 + ], + "angle": 0, + "content": "Xinyu Cao\\(^{1}\\) (202414842@mail.sdu.edu.cn)" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.153, + 0.599, + 0.166 + ], + "angle": 0, + "content": "Affiliations:" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.167, + 0.668, + 0.183 + ], + "angle": 0, + "content": "1 Shandong University" + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.206, + 0.553, + 0.22 + ], + "angle": 0, + "content": "FSV" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.228, + 0.905, + 0.258 + ], + "angle": 0, + "content": "Title: Enhanced Prototype-based Cross-domain Few-shot Object Detection" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.26, + 0.586, + 0.272 + ], + "angle": 0, + "content": "Members:" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.274, + 0.791, + 0.289 + ], + "angle": 0, + "content": "Yuqing Yang1 (yyqyang101@gmail.com)" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.29, + 0.598, + 0.304 + ], + "angle": 0, + "content": "Affiliations:" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.304, + 0.698, + 0.32 + ], + "angle": 0, + "content": "1 George Mason University" + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.344, + 0.551, + 0.357 + ], + "angle": 0, + "content": "IPC" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.366, + 0.905, + 0.395 + ], + "angle": 0, + "content": "Title: Test-time Adaptation Strategy for Cross-Domain Few-Shot Object Detection" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.397, + 0.586, + 0.409 + ], + "angle": 0, + "content": "Members:" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.411, + 0.822, + 0.426 + ], + "angle": 0, + "content": "Dianmo Sheng\\(^{1}\\) (dmsheng@mail.ustc.edu.cn)," + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.426, + 0.619, + 0.441 + ], + "angle": 0, + "content": "Xuanpu Zhao1," + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.441, + 0.588, + 0.456 + ], + "angle": 0, + "content": "Zhiyu Li1," + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.456, + 0.613, + 0.472 + ], + "angle": 0, + "content": "Xuyang Ding" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.472, + 0.598, + 0.486 + ], + "angle": 0, + "content": "Affiliations:" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.487, + 0.84, + 0.502 + ], + "angle": 0, + "content": "1 University of Science and Technology of China" + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.526, + 0.552, + 0.541 + ], + "angle": 0, + "content": "LJY" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.548, + 0.905, + 0.578 + ], + "angle": 0, + "content": "Title: Similarity-Calibrated Prototype Refinement for Cross-Domain Few-Shot Object Detection" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.58, + 0.586, + 0.592 + ], + "angle": 0, + "content": "Members:" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.594, + 0.771, + 0.609 + ], + "angle": 0, + "content": "Wenqian Li (wenqianli.li@seu.edu.cn)" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.61, + 0.598, + 0.624 + ], + "angle": 0, + "content": "Affiliations:" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.625, + 0.655, + 0.64 + ], + "angle": 0, + "content": "Southeast University" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.925, + 0.509, + 0.937 + ], + "angle": 0, + "content": "18" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.093, + 0.091, + 0.188, + 0.106 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.101, + 0.115, + 0.484, + 0.17 + ], + "angle": 0, + "content": "[1] Shuai Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Sibo Song, Kai Dang, Peng Wang, Shijie Wang, Jun Tang, et al. Qwen2. 5-vl technical report. arXiv preprint arXiv:2502.13923, 2025.8, 9" + }, + { + "type": "ref_text", + "bbox": [ + 0.101, + 0.171, + 0.484, + 0.213 + ], + "angle": 0, + "content": "[2] Weilin Cai, Juyong Jiang, Fan Wang, Jing Tang, Sunghun Kim, and Jiayi Huang. A survey on mixture of experts. arXiv preprint arXiv:2407.06204, 2024. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.215, + 0.484, + 0.295 + ], + "angle": 0, + "content": "[3] Zheng Chen, Kai Liu, Jue Gong, Jingkai Wang, Lei Sun, Zongwei Wu, Radu Timofte, Yulun Zhang, et al. Ntire 2025 challenge on image super-resolution \\((\\times 4)\\): Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.298, + 0.484, + 0.38 + ], + "angle": 0, + "content": "[4] Zheng Chen, Jingkai Wang, Kai Liu, Jue Gong, Lei Sun, Zongwei Wu, Radu Timofte, Yulun Zhang, et al. Ntire 2025 challenge on real-world face restoration: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.382, + 0.484, + 0.437 + ], + "angle": 0, + "content": "[5] Marcos Conde, Radu Timofte, et al. Ntire 2025 challenge on raw image restoration and super-resolution. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.438, + 0.484, + 0.494 + ], + "angle": 0, + "content": "[6] Marcos Conde, Radu Timofte, et al. Raw image reconstruction from RGB on smartphones. ntire 2025 challenge report. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.495, + 0.484, + 0.591 + ], + "angle": 0, + "content": "[7] Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. Bert: Pre-training of deep bidirectional transformers for language understanding. In Proceedings of the 2019 conference of the North American chapter of the association for computational linguistics: human language technologies, volume 1 (long and short papers), pages 4171–4186, 2019. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.593, + 0.484, + 0.633 + ], + "angle": 0, + "content": "[8] Geir Drange. Arthropod taxonomy orders object detection dataset. In https://doi.org/10.34740/kaggle/dsv/1240192, 2019.2" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.635, + 0.484, + 0.746 + ], + "angle": 0, + "content": "[9] Egor Ershov, Sergey Korchagin, Alexei Khalin, Artyom Panshin, Arseniy Terekhin, Ekaterina Zaychenkova, Georgiy Lobarev, Vsevolod Plokhotnyuk, Denis Abramov, Elisey Zhdanov, Sofia Dorogova, Yasin Mamedov, Nikola Banic, Georgii Perevozhikov, Radu Timofte, et al. Ntire 2025 challenge on night photography rendering. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.094, + 0.747, + 0.483, + 0.801 + ], + "angle": 0, + "content": "[10] William Fedus, Barret Zoph, and Noam Shazeer. Switch transformers: Scaling to trillion parameter models with simple and efficient sparsity. Journal of Machine Learning Research, 23(120):1-39, 2022. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.094, + 0.803, + 0.483, + 0.871 + ], + "angle": 0, + "content": "[11] Shenghao Fu, Qize Yang, Qijie Mo, Junkai Yan, Xihan Wei, Jingke Meng, Xiaohua Xie, and Wei-Shi Zheng. Llmdet: Learning strong open-vocabulary object detectors under the supervision of large language models. arXiv preprint arXiv:2501.18954, 2025. 8, 9" + }, + { + "type": "ref_text", + "bbox": [ + 0.094, + 0.873, + 0.483, + 0.901 + ], + "angle": 0, + "content": "[12] Yuqian Fu, Yanwei Fu, and Yu-Gang Jiang. Meta-fdmixup: Cross-domain few-shot learning guided by labeled target" + }, + { + "type": "list", + "bbox": [ + 0.094, + 0.115, + 0.484, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.545, + 0.093, + 0.905, + 0.121 + ], + "angle": 0, + "content": "data. In Proceedings of the 29th ACM international conference on multimedia, pages 5326-5334, 2021. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.122, + 0.907, + 0.188 + ], + "angle": 0, + "content": "[13] Yuqian Fu, Yu Xie, Yanwei Fu, Jingjing Chen, and Yu-Gang Jiang. Me-d2n: Multi-expert domain decompositional network for cross-domain few-shot learning. In Proceedings of the 30th ACM international conference on multimedia, pages 6609-6617, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.19, + 0.905, + 0.232 + ], + "angle": 0, + "content": "[14] Yuqian Fu, Yu Xie, Yanwei Fu, and Yu-Gang Jiang. Styleadv: Meta style adversarial training for cross-domain few-shot learning. In CVPR, 2023. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.233, + 0.905, + 0.314 + ], + "angle": 0, + "content": "[15] Yuqian Fu, Yu Wang, Yixuan Pan, Lian Huai, Xingyu Qiu, Zeyu Shangguan, Tong Liu, Yanwei Fu, Luc Van Gool, and Xingqun Jiang. Cross-domain few-shot object detection via enhanced open-set object detector. In European Conference on Computer Vision, pages 247-264. Springer, 2024. 1, 2, 4, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.316, + 0.905, + 0.399 + ], + "angle": 0, + "content": "[16] Yuqian Fu, Xingyu Qiu, Bin Ren Yanwei Fu, Radu Timofte, Nicu Sebe, Ming-Hsuan Yang, Luc Van Gool, et al. Ntire 2025 challenge on cross-domain few-shot object detection: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.4, + 0.905, + 0.481 + ], + "angle": 0, + "content": "[17] Golnaz Ghiasi, Yin Cui, Aravind Srinivas, Rui Qian, Tsung-Yi Lin, Ekin D Cubuk, Quoc V Le, and Barret Zoph. Simple copy-paste is a strong data augmentation method for instance segmentation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 2918-2928, 2021. 9" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.484, + 0.905, + 0.567 + ], + "angle": 0, + "content": "[18] Yunhui Guo, Noel C Codella, Leonid Karlinsky, James V Codella, John R Smith, Kate Saenko, Tajana Rosing, and Rogerio Feris. A broader study of cross-domain few-shot learning. In Computer vision-ECCV 2020: 16th European conference, Glasgow, UK, August 23-28, 2020, proceedings, part XXVII 16, pages 124-141. Springer, 2020. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.568, + 0.905, + 0.648 + ], + "angle": 0, + "content": "[19] Shuhao Han, Haotian Fan, Fangyuan Kong, Wenjie Liao, Chunle Guo, Chongyi Li, Radu Timofte, et al. Ntire 2025 challenge on text to image generation model quality assessment. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.65, + 0.905, + 0.718 + ], + "angle": 0, + "content": "[20] Meng-Ru Hsieh, Yen-Liang Lin, and Winston H Hsu. Drone-based object counting by spatially regularized regional proposal network. In Proceedings of the IEEE international conference on computer vision, pages 4145-4153, 2017. 1, 2, 8, 9, 13" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.72, + 0.905, + 0.774 + ], + "angle": 0, + "content": "[21] Edward J Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, Weizhu Chen, et al. Lora: Low-rank adaptation of large language models. *ICLR*, 1(2):3, 2022. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.776, + 0.905, + 0.845 + ], + "angle": 0, + "content": "[22] Gabriel Ilharco, Mitchell Wortsman, Ross Wightman, Cade Gordon, Nicholas Carlini, Rohan Taori, Achal Dave, Vaishaal Shankar, Hongseok Namkoong, John Miller, Hannaneh Hajishirzi, Ali Farhadi, and Ludwig Schmidt. Openclip, 2021. 10" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.846, + 0.905, + 0.899 + ], + "angle": 0, + "content": "[23] Naoto Inoue, Ryosuke Furuta, Toshihiko Yamasaki, and Kiyoharu Aizawa. Cross-domain weakly-supervised object detection through progressive domain adaptation. In CVPR, 2018. 2" + }, + { + "type": "list", + "bbox": [ + 0.517, + 0.093, + 0.907, + 0.899 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.925, + 0.508, + 0.937 + ], + "angle": 0, + "content": "19" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.093, + 0.486, + 0.176 + ], + "angle": 0, + "content": "[24] Varun Jain, Zongwei Wu, Quan Zou, Louis Florentin, Henrik Turbell, Sandeep Siddhartha, Radu Timofte, et al. Ntire 2025 challenge on video quality enhancement for video conferencing: Datasets, methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.177, + 0.483, + 0.245 + ], + "angle": 0, + "content": "[25] Albert Q Jiang, Alexandre Sablayrolles, Antoine Roux, Arthur Mensch, Blanche Savary, Chris Bamford, Devendra Singh Chaplot, Diego de las Casas, Emma Bou Hanna, Florian Bressand, et al. Mixtral of experts. arXiv preprint arXiv:2401.04088, 2024. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.247, + 0.483, + 0.302 + ], + "angle": 0, + "content": "[26] Lihao Jiang, Yi Wang, Qi Jia, Shengwei Xu, Yu Liu, Xin Fan, Haojie Li, Risheng Liu, Xinwei Xue, and Ruili Wang. Underwater species detection using channel sharpening attention. In ACM MM, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.303, + 0.483, + 0.371 + ], + "angle": 0, + "content": "[27] Aishwarya Kamath, Mannat Singh, Yann LeCun, Gabriel Synnaeve, Ishan Misra, and Nicolas Carion. Mdetr-modulated detection for end-to-end multi-modal understanding. In Proceedings of the IEEE/CVF international conference on computer vision, pages 1780-1790, 2021. 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.372, + 0.483, + 0.426 + ], + "angle": 0, + "content": "[28] Mona Köhler, Markus Eisenbach, and Horst-Michael Gross. Few-shot object detection: A comprehensive survey. IEEE Transactions on Neural Networks and Learning Systems, 2023. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.428, + 0.483, + 0.509 + ], + "angle": 0, + "content": "[29] Ranjay Krishna, Yuke Zhu, Oliver Groth, Justin Johnson, Kenji Hata, Joshua Kravitz, Stephanie Chen, Yannis Kalantidis, Li-Jia Li, David A Shamma, et al. Visual genome: Connecting language and vision using crowdsourced dense image annotations. International journal of computer vision, 123:32-73, 2017. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.511, + 0.483, + 0.567 + ], + "angle": 0, + "content": "[30] Alex Krizhevsky, Ilya Sutskever, and Geoffrey E Hinton. Imagenet classification with deep convolutional neural networks. In Advances in Neural Information Processing Systems, pages 1097-1105, 2012." + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.568, + 0.483, + 0.663 + ], + "angle": 0, + "content": "[31] Alina Kuznetsova, Hassan Rom, Neil Alldrin, Jasper Uijlings, Ivan Krasin, Jordi Pont-Tuset, Shahab Kamali, Stefan Popov, Matteo Malloci, Alexander Kolesnikov, et al. The open images dataset v4: Unified image classification, object detection, and visual relationship detection at scale. International journal of computer vision, 128(7):1956-1981, 2020. 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.665, + 0.483, + 0.748 + ], + "angle": 0, + "content": "[32] Sangmin Lee, Eunpil Park, Angel Canelo, Hyunhee Park, Youngjo Kim, Hyungju Chun, Xin Jin, Chongyi Li, Chun-Le Guo, Radu Timofte, et al. Ntire 2025 challenge on efficient burst hdr and restoration: Datasets, methods, and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.748, + 0.483, + 0.829 + ], + "angle": 0, + "content": "[33] Chunyuan Li, Haotian Liu, Liunian Li, Pengchuan Zhang, Jyoti Aneja, Jianwei Yang, Ping Jin, Houdong Hu, Zicheng Liu, Yong Jae Lee, et al. Elevater: A benchmark and toolkit for evaluating language-augmented visual models. Advances in Neural Information Processing Systems, 35:9287-9301, 2022. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.831, + 0.483, + 0.873 + ], + "angle": 0, + "content": "[34] Ke Li, Gang Wan, Gong Cheng, Liqui Meng, and Junwei Han. Object detection in optical remote sensing images: A survey and a new benchmark. ISPRS, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.874, + 0.483, + 0.903 + ], + "angle": 0, + "content": "[35] Liunian Harold Li, Pengchuan Zhang, Haotian Zhang, Jianwei Yang, Chunyuan Li, Yiwu Zhong, Lijuan Wang, Lu" + }, + { + "type": "list", + "bbox": [ + 0.093, + 0.093, + 0.486, + 0.903 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.545, + 0.093, + 0.905, + 0.149 + ], + "angle": 0, + "content": "Yuan, Lei Zhang, Jenq-Neng Hwang, et al. Grounded language-image pre-training. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 10965-10975, 2022. 4, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.151, + 0.905, + 0.207 + ], + "angle": 0, + "content": "[36] Wei-Hong Li, Xialei Liu, and Hakan Bilen. Cross-domain few-shot learning with task-specific adapters. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 7161-7170, 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.208, + 0.905, + 0.305 + ], + "angle": 0, + "content": "[37] Xin Li, Yeying Jin, Xin Jin, Zongwei Wu, Bingchen Li, Yufei Wang, Wenhan Yang, Yu Li, Zhibo Chen, Bihan Wen, Robby Tan, Radu Timofte, et al. Ntire 2025 challenge on day and night raindrop removal for dual-focused images: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.307, + 0.905, + 0.403 + ], + "angle": 0, + "content": "[38] Xin Li, Xijun Wang, Bingchen Li, Kun Yuan, Yizhen Shao, Suhang Yao, Ming Sun, Chao Zhou, Radu Timofte, and Zhibo Chen. Ntire 2025 challenge on short-formUGC video quality assessment and enhancement: Kwaisr dataset and study. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.406, + 0.905, + 0.503 + ], + "angle": 0, + "content": "[39] Xin Li, Kun Yuan, Bingchen Li, Fengbin Guan, Yizhen Shao, Zihao Yu, Xijun Wang, Yiting Lu, Wei Luo, Suhang Yao, Ming Sun, Chao Zhou, Zhibo Chen, Radu Timofte, et al. Ntire 2025 challenge on short-formUGC video quality assessment and enhancement: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.505, + 0.905, + 0.588 + ], + "angle": 0, + "content": "[40] Jie Liang, Radu Timofte, Qiaosi Yi, Zhengqiang Zhang, Shuaizheng Liu, Lingchen Sun, Rongyuan Wu, Xindong Zhang, Hui Zeng, Lei Zhang, et al. Ntire 2025 the 2nd restore any image model (RAIM) in the wild challenge. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.59, + 0.905, + 0.674 + ], + "angle": 0, + "content": "[41] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dólar, and C Lawrence Zitnick. Microsoft coco: Common objects in context. In Computer vision-ECCV 2014: 13th European conference, zurich, Switzerland, September 6-12, 2014, proceedings, part v 13, pages 740-755. Springer, 2014. 1, 2, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.676, + 0.905, + 0.744 + ], + "angle": 0, + "content": "[42] Aixin Liu, Bei Feng, Bin Wang, Bingxuan Wang, Bo Liu, Chenggang Zhao, Chengqi Dengr, Chong Ruan, Damai Dai, Daya Guo, et al. Deepseek-v2: A strong, economical, and efficient mixture-of-experts language model. arXiv preprint arXiv:2405.04434, 2024. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.747, + 0.905, + 0.83 + ], + "angle": 0, + "content": "[43] Shilong Liu, Zhaoyang Zeng, Tianhe Ren, Feng Li, Hao Zhang, Jie Yang, Qing Jiang, Chunyuan Li, Jianwei Yang, Hang Su, et al. Grounding dino: Marrying dino with grounded pre-training for open-set object detection. In European Conference on Computer Vision, pages 38-55. Springer, 2024. 4, 6, 7, 9, 10" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.832, + 0.905, + 0.901 + ], + "angle": 0, + "content": "[44] Xiaohong Liu, Xiongkuo Min, Qiang Hu, Xiaoyun Zhang, Jie Guo, et al. Ntire 2025 XGC quality assessment challenge: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + }, + { + "type": "list", + "bbox": [ + 0.517, + 0.093, + 0.905, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.925, + 0.509, + 0.937 + ], + "angle": 0, + "content": "20" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.093, + 0.482, + 0.176 + ], + "angle": 0, + "content": "[45] Xiaoning Liu, Zongwei Wu, Florin-Alexandru Vasluianu, Hailong Yan, Bin Ren, Yulun Zhang, Shuhang Gu, Le Zhang, Ce Zhu, Radu Timofte, et al. Ntire 2025 challenge on low light image enhancement: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.177, + 0.482, + 0.245 + ], + "angle": 0, + "content": "[46] Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, and Baining Guo. Swin transformer: Hierarchical vision transformer using shifted windows. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 10012-10022, 2021. 7, 9" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.247, + 0.482, + 0.299 + ], + "angle": 0, + "content": "[47] Alexander Neubeck and Luc Van Gool. Efficient nonmaximum suppression. In 18th international conference on pattern recognition (ICPR'06), pages 850-855. IEEE, 2006. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.302, + 0.482, + 0.37 + ], + "angle": 0, + "content": "[48] Maxime Oquab, Timothee Darct, Theo Moutakanni, Huy Vo, Marc Szafraniec, Vasil Khalidov, Pierre Fernandez, Daniel Haziza, Francisco Massa, Alaaeldin El-Nouby, et al. Dinov2: Learning robust visual features without supervision. arXiv preprint arXiv:2304.07193, 2023. 14" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.372, + 0.482, + 0.426 + ], + "angle": 0, + "content": "[49] Vicente Ordonez, Girish Kulkarni, and Tamara Berg. Im2text: Describing images using 1 million captioned photographs. Advances in neural information processing systems, 24, 2011. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.428, + 0.482, + 0.482 + ], + "angle": 0, + "content": "[50] Hongpeng Pan, Shifeng Yi, Shouwei Yang, Lei Qi, Bing Hu, Yi Xu, and Yang Yang. The solution for cvpr2024 foundational few-shot object detection challenge. arXiv preprint arXiv:2406.12225, 2024. 9" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.484, + 0.482, + 0.55 + ], + "angle": 0, + "content": "[51] Jiancheng Pan, Yanxing Liu, Yuqian Fu, Muyuan Ma, Jiaohao Li, Danda Pani Paudel, Luc Van Gool, and Xiaomeng Huang. Locate anything on earth: Advancing open-vocabulary object detection for remote sensing community, 2024. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.553, + 0.482, + 0.594 + ], + "angle": 0, + "content": "[52] Jiancheng Pan, Muyuan Ma, Qing Ma, Cong Bai, and Shengyong Chen. Pir: Remote sensing image-text retrieval with prior instruction representation learning, 2024. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.596, + 0.482, + 0.636 + ], + "angle": 0, + "content": "[53] Limeng Qiao, Yuxuan Zhao, Zhiyuan Li, Xi Qiu, Jianan Wu, and Chi Zhang. Defrcn: Decoupled faster r-cnn for few-shot object detection. In ICCV, 2021. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.638, + 0.482, + 0.718 + ], + "angle": 0, + "content": "[54] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PmLR, 2021. 10" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.721, + 0.482, + 0.789 + ], + "angle": 0, + "content": "[55] Bin Ren, Yahui Liu, Yue Song, Wei Bi, Rita Cucchiara, Nicu Sebe, and Wei Wang. Masked jigsaw puzzle: A versatile position embedding for vision transformers. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 20382-20391, 2023. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.79, + 0.482, + 0.858 + ], + "angle": 0, + "content": "[56] Bin Ren, Yawei Li, Jingyun Liang, Rakesh Ranjan, Mengyuan Liu, Rita Cucchiara, Luc V Gool, Ming-Hsuan Yang, and Nicu Sebe. Sharing key semantics in transformer makes efficient image restoration. Advances in Neural Information Processing Systems, 37:7427-7463, 2024. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.86, + 0.482, + 0.901 + ], + "angle": 0, + "content": "[57] Bin Ren, Hang Guo, Lei Sun, Zongwei Wu, Radu Timofte, Yawei Li, et al. The tenth nitre 2025 efficient superresolution challenge report. In Proceedings of the IEEE/CVF" + }, + { + "type": "list", + "bbox": [ + 0.093, + 0.093, + 0.482, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.549, + 0.093, + 0.905, + 0.12 + ], + "angle": 0, + "content": "Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.123, + 0.905, + 0.19 + ], + "angle": 0, + "content": "[58] Tianhe Ren, Qing Jiang, Shilong Liu, Zhaoyang Zeng, Wenlong Liu, Han Gao, Hongjie Huang, Zhengyu Ma, Xiaoke Jiang, Yihao Chen, et al. Grounding dino 1.5: Advance the\" edge\" of open-set object detection. arXiv preprint arXiv:2405.10300, 2024. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.193, + 0.905, + 0.247 + ], + "angle": 0, + "content": "[59] Xiaoqian Ruan and Wei Tang. Fully test-time adaptation for object detection. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1038-1047, 2024. 15" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.25, + 0.905, + 0.304 + ], + "angle": 0, + "content": "[60] Inkyu Sa, Zongyuan Ge, Feras Dayoub, Ben Upcroft, Tristan Perez, and Chris McCool. Deepfruits: A fruit detection system using deep neural networks. sensors, 16(8):1222, 2016. 1, 2, 8, 9, 13" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.307, + 0.905, + 0.389 + ], + "angle": 0, + "content": "[61] Nickolay Safonov, Alexey Bryntsev, Andrey Moskalenko, Dmitry Kulikov, Dmitriy Vatolin, Radu Timofte, et al. Ntire 2025 challenge on UGC video enhancement: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.391, + 0.905, + 0.446 + ], + "angle": 0, + "content": "[62] Alzayat Saleh, Issam H Laradji, Dmitry A Konovalov, Michael Bradley, David Vazquez, and Marcus Sheaves. A realistic fish-habitat dataset to evaluate algorithms for underwater visual analysis. Scientific Reports, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.448, + 0.905, + 0.503 + ], + "angle": 0, + "content": "[63] Zeyu Shangguan and Mohammad Rostami. Identification of novel classes for improving few-shot object detection. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 3356-3366, 2023. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.505, + 0.905, + 0.545 + ], + "angle": 0, + "content": "[64] Zeyu Shangguan and Mohammad Rostami. Improved region proposal network for enhanced few-shot object detection. Neural Networks, 180:106699, 2024. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.548, + 0.905, + 0.616 + ], + "angle": 0, + "content": "[65] Shuai Shao, Zeming Li, Tianyuan Zhang, Chao Peng, Gang Yu, Xiangyu Zhang, Jing Li, and Jian Sun. Objects365: A large-scale, high-quality dataset for object detection. In Proceedings of the IEEE/CVF international conference on computer vision, pages 8430-8439, 2019. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.619, + 0.905, + 0.659 + ], + "angle": 0, + "content": "[66] Jake Snell, Kevin Swersky, and Richard Zemel. Prototypical networks for few-shot learning. Advances in neural information processing systems, 30, 2017. 10" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.661, + 0.905, + 0.702 + ], + "angle": 0, + "content": "[67] Kechen Song and Yunhui Yan. A noise robust method based on completed local binary patterns for hot-rolled steel strip surface defects. Applied Surface Science, 2013. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.705, + 0.905, + 0.746 + ], + "angle": 0, + "content": "[68] Bo Sun, Banghuai Li, Shengcai Cai, Ye Yuan, and Chi Zhang. Fsce: Few-shot object detection via contrastive proposal encoding. In CVPR, 2021. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.748, + 0.907, + 0.83 + ], + "angle": 0, + "content": "[69] Lei Sun, Andrea Alfarano, Peiqi Duan, Shaolin Su, Kaiwei Wang, Boxin Shi, Radu Timofte, Danda Pani Paudel, Luc Van Gool, et al. Ntire 2025 challenge on event-based image deblurring: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.832, + 0.905, + 0.901 + ], + "angle": 0, + "content": "[70] Lei Sun, Hang Guo, Bin Ren, Luc Van Gool, Radu Timofte, Yawei Li, et al. The tenth nitre 2025 image denoising challenge report. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + }, + { + "type": "list", + "bbox": [ + 0.517, + 0.093, + 0.907, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.925, + 0.508, + 0.937 + ], + "angle": 0, + "content": "21" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.093, + 0.482, + 0.146 + ], + "angle": 0, + "content": "[71] Hao Tang, Chengcheng Yuan, Zechao Li, and Jinhui Tang. Learning attention-guided pyramidal features for few-shot fine-grained recognition. Pattern Recognition, 130:108792, 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.15, + 0.482, + 0.205 + ], + "angle": 0, + "content": "[72] Hung-Yu Tseng, Hsin-Ying Lee, Jia-Bin Huang, and Ming-Hsuan Yang. Cross-domain few-shot classification via learned feature-wise transformation. arXiv preprint arXiv:2001.08735, 2020. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.208, + 0.482, + 0.276 + ], + "angle": 0, + "content": "[73] Florin-Alexandru Vasluianu, Tim Seizinger, Zhuyun Zhou, Cailian Chen, Zongwei Wu, Radu Timofte, et al. Ntire 2025 image shadow removal challenge report. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.279, + 0.482, + 0.347 + ], + "angle": 0, + "content": "[74] Florin-Alexandru Vasluianu, Tim Seizinger, Zhuyun Zhou, Zongwei Wu, Radu Timofte, et al. Ntire 2025 ambient lighting normalization challenge. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.349, + 0.482, + 0.39 + ], + "angle": 0, + "content": "[75] Xin Wang, Thomas E Huang, Trevor Darrell, Joseph E Gonzalez, and Fisher Yu. Frustratingly simple few-shot object detection. arXiv preprint arXiv:2003.06957, 2020. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.392, + 0.482, + 0.447 + ], + "angle": 0, + "content": "[76] Xinkuang Wang, Wenjing Li, and Zhongcheng Wu. Cardd: A new dataset for vision-based car damage detection. IEEE Transactions on Intelligent Transportation Systems, 24(7): 7202-7214, 2023. 1, 2, 9, 13" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.45, + 0.482, + 0.532 + ], + "angle": 0, + "content": "[77] Yingqian Wang, Zhengyu Liang, Fengyuan Zhang, Lvli Tian, Longguang Wang, Juncheng Li, Jungang Yang, Radu Timofte, Yulan Guo, et al. Ntire 2025 challenge on light field image super-resolution: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.535, + 0.482, + 0.603 + ], + "angle": 0, + "content": "[78] Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chau-mond, Clement Delangue, Anthony Moi, Pierrick Cistac, Tim Rault, Rémi Louf, Morgan Funtowicz, et al. Huggingface's transformers: State-of-the-art natural language processing. arXiv preprint arXiv:1910.03771, 2019. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.606, + 0.482, + 0.659 + ], + "angle": 0, + "content": "[79] Dongxian Wu, Shu-Tao Xia, and Yisen Wang. Adversarial weight perturbation helps robust generalization. Advances in neural information processing systems, 33:2958-2969, 2020. 9" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.662, + 0.482, + 0.717 + ], + "angle": 0, + "content": "[80] Fuzhao Xue, Zian Zheng, Yao Fu, Jinjie Ni, Zangwei Zheng, Wangchunshu Zhou, and Yang You. Openmoe: An early effort on open mixture-of-experts language models. arXiv preprint arXiv:2402.01739, 2024. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.72, + 0.482, + 0.815 + ], + "angle": 0, + "content": "[81] Kangning Yang, Jie Cai, Ling Ouyang, Florin-Alexandru Vasluianu, Radu Timofte, Jiaming Ding, Huiming Sun, Lan Fu, Jinlong Li, Chiu Man Ho, Zibo Meng, et al. Ntire 2025 challenge on single image reflection removal in the wild: Datasets, methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.818, + 0.482, + 0.9 + ], + "angle": 0, + "content": "[82] Pierluigi Zama Ramirez, Fabio Tosi, Luigi Di Stefano, Radu Timofte, Alex Costanzino, Matteo Poggi, Samuele Salti, Stefano Mattoccia, et al. Ntire 2025 challenge on hr depth from images of specular and transparent surfaces. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + }, + { + "type": "list", + "bbox": [ + 0.093, + 0.093, + 0.482, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.093, + 0.905, + 0.16 + ], + "angle": 0, + "content": "[83] Zican Zha, Hao Tang, Yunlian Sun, and Jinhui Tang. Boosting few-shot fine-grained recognition with background suppression and foreground alignment. IEEE Transactions on Circuits and Systems for Video Technology, 33(8):3947-3961, 2023. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.164, + 0.905, + 0.232 + ], + "angle": 0, + "content": "[84] Ji Zhang, Jingkuan Song, Lianli Gao, and Hengtao Shen. Free-lunch for cross-domain few-shot learning: Style-aware episodic training with robust contrastive learning. In Proceedings of the 30th ACM international conference on multimedia, pages 2586-2594, 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.234, + 0.905, + 0.275 + ], + "angle": 0, + "content": "[85] Xinyu Zhang, Yuhan Liu, Yuting Wang, and Abdeslam Boularias. Detect everything with few examples. arXiv preprint arXiv:2309.12969, 2023. 1, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.278, + 0.905, + 0.343 + ], + "angle": 0, + "content": "[86] Linhai Zhuo, Yuqian Fu, Jingjing Chen, Yixin Cao, and YuGang Jiang. Tgdm: Target guided dynamic mixup for cross-domain few-shot learning. In Proceedings of the 30th ACM International Conference on Multimedia, pages 6368-6376, 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.347, + 0.905, + 0.416 + ], + "angle": 0, + "content": "[87] Linhai Zhuo, Yuqian Fu, Jingjing Chen, Yixin Cao, and YuGang Jiang. Unified view empirical study for large pretrained model on cross-domain few-shot learning. ACM Transactions on Multimedia Computing, Communications and Applications, 20(9):1-18, 2024. 1" + }, + { + "type": "list", + "bbox": [ + 0.517, + 0.093, + 0.905, + 0.416 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.925, + 0.508, + 0.936 + ], + "angle": 0, + "content": "22" + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_10xxx/2504.10685/2b7c0cf2-f712-45f3-86c5-afe1fcf3d48b_origin.pdf b/data/2025/2504_10xxx/2504.10685/2b7c0cf2-f712-45f3-86c5-afe1fcf3d48b_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..7205bafbe11bf59eb14889664482dff874131c60 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10685/2b7c0cf2-f712-45f3-86c5-afe1fcf3d48b_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ff59834efb6a13186bd43413fc3f7a21ac4621016583a57408f9d0146e43a2cd +size 14661352 diff --git a/data/2025/2504_10xxx/2504.10685/full.md b/data/2025/2504_10xxx/2504.10685/full.md new file mode 100644 index 0000000000000000000000000000000000000000..023c118d9672f4c4fe4e6bcf62c8c6b9c7870cf2 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10685/full.md @@ -0,0 +1,890 @@ +# NTIRE 2025 Challenge on Cross-Domain Few-Shot Object Detection: Methods and Results + +Yuqian Fu\* Xingyu Qiu\* Bin Ren\* Yanwei Fu\* Radu Timofte\* Nicu Sebe\* Ming-Hsuan Yang\* Luc Van Gool\* Kaijin Zhang Qingpeng Nong Xiugang Dong Hong Gao Xiangsheng Zhou Jiancheng Pan Yanxing Liu Xiao He Jiahao Li Yuze Sun Xiaomeng Huang Zhenyu Zhang Ran Ma Yuhan Liu Zijian Zhuang Shuai Yi Yixiong Zou Lingyi Hong Mingxi Chen Runze Li Xingdong Sheng Wenqiang Zhang Weisen Chen Yongxin Yan Xinguo Chen Yuanjie Shao Zhengrong Zuo Nong Sang Hao Wu Haoran Sun Shuming Hu Yan Zhang Zhiguang Shi Yu Zhang Chao Chen Tao Wang Da Feng Linhai Zhuo Ziming Lin Yali Huang Jie Me Yiming Yang Mi Guo Mingyuan Jiu Mingliang Xu Maomao Xiong Qunshu Zhang Xinyu Cao Yuqing Yang Dianmo Sheng Xuanpu Zhao Zhiyu Li Xuyang Ding Wenqian Li + +# Abstract + +Cross-Domain Few-Shot Object Detection (CD-FSOD) poses significant challenges to existing object detection and few-shot detection models when applied across domains. In conjunction with NTIRE 2025, we organized the 1st CD-FSOD Challenge, aiming to advance the performance of current object detectors on entirely novel target domains with only limited labeled data. The challenge attracted 152 registered participants, received submissions from 42 teams, and concluded with 13 teams making valid final submissions. Participants approached the task from diverse perspectives, proposing novel models that achieved new state-of-the-art (SOTA) results under both open-source and closed-source settings. In this report, we present an overview of the 1st NTIRE 2025 CD-FSOD Challenge, highlighting the proposed solutions and summarizing the results submitted by the participants. + +# 1. Introduction + +Few-shot object detection (FSOD) [28] aims at allowing models to detect novel objects using minimal labeled examples. While significant progress has been made, existing FSOD methods [53, 63, 64, 68, 75, 85] typically as + +sume that the training (source) and testing (target) data are drawn from the same domain. However, this assumption rarely holds in real-world applications. For instance, a model trained on natural images such as those in MS-COCO [41] may face substantial challenges when applied to a novel domain like remote sensing imagery. This cross-domain few-shot learning (CD-FSL) problem has attracted considerable attention in the context of classification [12-14, 18, 36, 55, 56, 71, 72, 83, 84, 86, 87], whereas its extension to object detection—i.e., cross-domain few-shot object detection (CD-FSOD)—remains much less explored. + +Upon gaping at this gap, one recent work, CD-ViTO [15], reveals that the different object detection datasets exhibit various characters in style, inter-class variance (ICV), and indefinable boundaries (IB). To further investigate how these factors affect the CD-FSOD, CD-ViTO thus proposes a new benchmark which takes MS-COCO as the source domain and six distinct datasets with diverse style, ICV, IB as unseen targets. Results indicate that the prior detectors all fail to generalize to those targets when the domain gap issue is observed. + +To further promote the advances on CD-FSOD, we newly introduce three more unseen targets, DeepFruits [60], Carpk [20], and CarDD [76] as testbeds for the CD-FSOD detectors. Following the observations in CD-ViTO, these three targets have domains different from the source data, with varying styles, ICV, and IB. Furthermore, to maximally boost the performance of models, we define the task setting proposed in CD-ViTO as closed-source CD-FSOD, while further introducing the new open-source CD-FSOD + +setting. To be specific, the closed-source setting means the source data for model training is strictly limited, e.g., MS-COCO as in CD-ViTO; while the open-source setting relaxes this limitation and allows the participants to leverage diverse knowledge sources and foundation models to explore the upper bound on the target domains. + +In collaboration with the 2025 New Trends in Image Restoration and Enhancement (NTIRE 2025) Workshop, which is particularly interested in the model robustness under changing conditions, we present the 1st CD-FSOD Challenge. It features an open-source CD-FSOD as the main track and a closed-source CD-FSOD as a special track. For the closed-source track, MS-COCO serves as the sole source domain. The validation phase includes six target domains proposed in CD-ViTO. Three additional novel domains are used as the final test sets for both tracks. Mean Average Precision (mAP) is employed as the ranking metric. We believe this challenge will drive progress in the CD-FSOD field and foster meaningful algorithmic innovations. + +This challenge is one of the NTIRE $2025^{1}$ Workshop associated challenges on: ambient lighting normalization [74], reflection removal in the wild [81], shadow removal [73], event-based image deblurring [69], image denoising [70], XGC quality assessment [44], UGC video enhancement [61], night photography rendering [9], image super-resolution (x4) [3], real-world face restoration [4], efficient super-resolution [57], HR depth estimation [82], efficient burst HDR and restoration [32], cross-domain few-shot object detection [16], short-form UGC video quality assessment and enhancement [38, 39], text to image generation model quality assessment [19], day and night raindrop removal for dual-focused images [37], video quality assessment for video conferencing [24], low light image enhancement [45], light field super-resolution [77], restore any image model (RAIM) in the wild [40], raw restoration and super-resolution [5], and raw reconstruction from RGB on smartphones [6]. + +# 2. NTIRE 2025 CD-FSOD Challenge + +# 2.1. Challenge Overview + +Our challenge aims to advance Cross-Domain Few-Shot Object Detection (CD-FSOD) — detecting objects under domain shifts with limited labeled data. We use six previously published target domains [15] as validation sets and introduce three newly constructed datasets for final testing. Beyond the dataset update, we introduce open-source CD-FSOD as a new setting, allowing participants to freely choose source datasets and pre-trained models to enhance generalization. Fig. 1 illustrates both the predefined closed-source CD-FSOD and the new open-source CD-FSOD settings, along with the newly introduced target domains. + +# 2.2. Task Formulations + +Closed-Source CD-FSOD. Given a source dataset $\mathcal{D}_S$ and a novel target dataset $\mathcal{D}_T$ , the closed-source CD-FSOD track assumes that the source class set $\mathcal{C}_S$ and the target class set $\mathcal{C}_T$ are completely disjoint, i.e., $\mathcal{C}_S \cap \mathcal{C}_T = \emptyset$ . Additionally, the distributions of the source domain $\mathcal{D}_S$ and the target domain $\mathcal{D}_T$ are not identical. Participants are required to train models on $\mathcal{D}_S$ and test them on $\mathcal{D}_T$ , where each class in $\mathcal{C}_T$ has only a few labeled examples. Usually, $\mathcal{D}_S$ is a single dataset, as in CD-ViTO [15]. We refer to this setting as closed-source CD-FSOD to differentiate it from the open-source variant. + +Open-Source CD-FSOD. In contrast to the closed-source setting where training data is strictly limited, the open-source CD-FSOD track is designed to leverage the capabilities of foundation models. Since these models are pretrained on large-scale and diverse datasets, it is practically hard to trace all the knowledge embedded within them. Hence, we refer to this setting as open-source. While the relaxed constraints on source data make it difficult to strictly ensure non-overlapping classes between the source and target data, the track still focuses on addressing the core challenges of domain shift and few-shot object detection. We believe this setting will significantly accelerate the development of CD-FSOD methods for real-world applications. + +In this challenge, the open-source CD-FSOD is designated as the main track, with awards presented to the top three teams. The closed-source CD-FSOD serves as the special track, with a single award granted to the top-performing team. + +$N$ -way $K$ -shot Protocol. We adopt the $N$ -way $K$ -shot evaluation protocol. For each novel class in the target class set $\mathcal{C}_T$ , $K$ labeled instances are provided, forming the support set $S$ . The remaining unlabeled instances constitute the query set $Q$ . Instances contained in the support set $S$ are used to assist the model in recognizing and detecting the objects in $Q$ . + +# 2.3. Challenge Phases and Datasets + +This challenge involves one development stage and one testing stage. The source data $\mathcal{D}_S$ for both stages is the same, i.e., MS-COCO [41] for the closed-source track and unlimited data for the open-source track. While the testing data $\mathcal{D}_T$ is different. + +Development Stage: Datasets proposed in the CD-ViTO, including ArTaxOr [8], Clipart1K [23], DIOR [34], Deep-Fish [62], NEU-DET [67], and UODD [26] are taken as targets $\mathcal{D}_T$ during development stage. + +Testing Stage. Three previously unseen datasets (DeepFruits [60], Carpk [20], and CarDD [76]) are introduced and used as the targets $\mathcal{D}_T$ for the final testing phase. Note that the ground truth annotations for these query sets are held exclusively by the challenge organizers. + +![](images/1bc0d3ab7ab85fb2208d5f61d937c337f5fbc4fba1fc2687c691f111074cfeb2.jpg) +Figure 1. Illustration of the challenge settings, including the closed-source and open-source CD-FSOD tracks. The three newly introduced target datasets used in the final testing phase are also shown. + +# 2.4. CD-ViTO Baseline Model + +We take CD-ViTO, the current State-of-the-art (SOTA) method under the closed-source setting, as the baseline for this challenge. Briefly, CD-ViTO is built upon DE-ViT [85], an open-set detector, and fine-tuned using the support set. As in Fig. 2, modules in blue are inherited from DE-ViT, while modules in orange are newly proposed. New improvements include learnable instance features, instance reweighting, domain prompter, and finetuning. + +![](images/db3a8fdfeee41c36fdb097170cdd5cbd99260e8264a29ec9a9b48b94f98c62f1.jpg) +Figure 2. Overall framework of CD-ViTO baseline method. + +Intuitively, the learnable instance feature module is designed to enhance inter-class variance (ICV) among different target classes by making the initially fixed instance features learnable and optimizing them through supervised few-shot detection tasks on the target support set. The instance reweighting module further improves prototype quality by assigning higher weights to high-quality object instances—e.g., those with minimal indefinable boundary (IB). These weights are learned via a lightweight MLP and fully connected layer, as illustrated in the upper part of Fig. 2(b). The domain prompter module introduces learnable domain perturbations to simulate varying domain styles. These perturbations are applied to object prototypes, followed by a prototype consistency loss to ensure that the introduced perturbations do not affect the seman- + +tic category of the prototypes. Simultaneously, a domain diversity loss encourages the generated domains to be sufficiently diverse. The lower part of Fig. 2(b) illustrates this mechanism. By injecting virtual domains and enforcing robustness against the induced perturbations, this strategy enhances the model's generalization under domain shifts. Finetuning is applied to the modules highlighted with fire icons in Fig. 2. + +# 2.5. Evaluation Protocol + +The final score is measured based on the model's performance on the three datasets of the testing stage. For each dataset, we validate the models on three different few-shot settings: 1-shot, 5-shot, and 10-shot. This results in a total of nine mean Average Precision (mAP) scores: D1_1shot, D1_5shot, D1_10shot; D2_1shot, D2_5shot, D2_10shot; and D3_1shot, D3_5shot, D3_10shot. The D1, D2, D3 denote the Deep-Fruits, Carpk, and CarDD, respectively. + +The final ranking score is computed as a weighted average avg() of these scores: + +$$ +\begin{array}{l} \text {S c o r e} = 2 * \text {a v g} (\mathrm {D} 1 _ {-} 1 \text {s h o t}, \mathrm {D} 2 _ {-} 1 \text {s h o t}, \mathrm {D} 3 _ {-} 1 \text {s h o t}) \\ + 1 * a v g (D 1 \_ 5 s h o t, D 2 \_ 5 s h o t, D 3 \_ 5 s h o t) \\ + 1 * a v g (D 1. 1 0 s h o t, D 2. 1 0 s h o t, D 3. 1 0 s h o t) \\ \end{array} +$$ + +Rationale for Weighted Scoring. We assign a higher weight $(\times 2)$ to the 1-shot setting for two primary reasons: (1) Performance in the 1-shot scenario is generally lower than in the 5-shot and 10-shot settings due to the limited availability of labeled examples for adaptation; and (2) emphasizing 1-shot performance encourages the development of models that are more robust and effective in extremely low-data conditions. + +Table 1. Open-source and closed-source results on CD-FSOD. D1, D2, and D3 represent DeepFruits, CARPK, and CarDD, respectively. Mean Average Precision (mAP) on 1-shot, 5-shot, and 10-shot are reported. Teams achieving top results are highlighted. + +
Main Open-Source Track
RankTeam NameScoreD1_1shotD1_5shotD1_10shotD2_1shotD2_5shotD2_10shotD3_1shotD3_5shotD3_10shot
1MoveFree231.0166.1864.5862.5760.4358.8959.0048.7549.2848.00
2AI4EarthLab215.9261.1965.4165.3559.1558.0559.0034.2143.8547.00
3IDCFS215.4863.3465.4164.7561.1460.4260.0032.3339.2443.00
4FDUROILab_Lenovo211.5561.2562.8964.6659.2459.2459.0035.1337.6340.00
5HUSTLab210.7863.7161.3257.1960.4260.4760.0031.0140.0943.00
6TongjiLab172.1442.3641.9041.7455.9555.9555.0031.4031.4031.00
7Manifold159.8632.0544.2844.2757.0657.0657.0018.7129.3432.00
8MXT108.2022.2640.5741.3421.1226.3430.2323.8128.0029.00
Special Closed-Source Track
RankTeam NameScoreD1_1shotD1_5shotD1_10shotD2_1shotD2_5shotD2_10shotD3_1shotD3_5shotD3_10shot
1X-Few125.9036.5846.9550.9823.0129.6828.0020.1129.6833.00
2MM117.3932.4745.2350.2318.8329.3628.0018.3129.1431.00
3FSV112.8131.2343.8949.3213.6926.0426.5919.7130.1633.17
4IPC105.6232.5847.1245.6413.4120.7713.0018.1829.9832.00
5LJY105.2833.5246.0445.3410.6811.4525.0018.3430.9432.00
/CD-ViTO Base [15]91.0027.9537.4243.586.7721.2824.0010.0726.4730.00
+ +# 3. Challenge Results + +Among the 152 registered participants, 8 and 5 teams have participated the final testing stage and submitted their results, codes, and factsheets. Table. 1 summarizes the results of these methods. Detailed descriptions of the participants' solutions are provided in Sec.4 and Sec.5, each corresponding to a different track. + +Open-Source Track Results. In the open-source track, nearly all participating teams achieved strong performance with clear improvements over the provided CD-ViTO baseline. This highlights not only the effectiveness of their proposed methods but also the significance of introducing this new task setting. As observed, relaxing the strict limitation on the source data offers a substantial advantage in tackling the CD-FSOD task. + +Specifically, the teams MoveFree, AI4EarthLab, and IDCFS emerged as the top performers in this track, achieving scores of 231.01, 215.92, and 215.48, respectively—significantly surpassing the baseline and other teams under the same track. + +Closed-Source Track Results. The performance achieved by the closed-source track teams is generally lower than that of the open-source track. This is quite understandable considering that the closed-source track enforces stricter constraints. Nevertheless, the participants managed to improve the baseline method clearly. + +In particular, the X-Few team stands out with a final score of 125.90, significantly outperforming other competitors. This shows that well-designed architectures and training strategies can still bring notable gains even without relying on large external models. Other teams in this track also delivered solid improvements. Their contributions are valuable in terms of enabling fair comparisons and emphasizing algorithmic annotations. + +# 4. Main Open-Source Track Methods + +# 4.1. MoveFree + +# 4.1.1. Proposed Method + +Open-set object detectors, such as [35], [43], and [58], are designed to detect objects based on arbitrary text descriptions. These models are typically pre-trained on large-scale, well-annotated datasets, ensuring strong alignment between textual and visual modalities. As a result, they exhibit remarkable zero-shot capabilities, allowing them to recognize and localize unseen object categories based solely on textual prompts. Given the strong generalization ability of such open-set detectors, this team believes that they are inherently well-suited for cross-domain few-shot object detection, as their robust pre-trained representations can be effectively adapted to new domains with minimal supervision. + +Thus, the MoveFree team focuses on leveraging and enhancing pre-trained open-set object detectors for CD-FSOD during the fine-tuning stage. The proposed approach introduces three key improvements: (1) To address the issue of missing annotations, self-training is introduced to iteratively refine the training data, thereby enhancing fine-tuning performance. (2) A Mixture-of-Experts (MoE) architecture is integrated into the open-set object detector to improve adaptability and robustness in the few-shot setting. (3) A two-stage fine-tuning pipeline is designed carefully. Code is made available2. + +Self-training Paradigm. According to the definition of few-shot object detection in CD-ViTO[15], $K$ -shot object detection refers to having $K$ labeled instances in the training data, rather than $K$ fully annotated images. This implies that instances of target categories may lack annotations in the provided training set. + +Upon careful investigation, this team identified that the issue of incomplete annotations is prominent across all three test datasets in this challenge. Drawing on their expertise in developing open-set object detectors, the team recognized that missing annotations for target categories can significantly degrade model performance. This degradation occurs because the loss function penalizes the model for correctly detecting unannotated objects, mistakenly treating them as false positives due to their absence in the ground truth labels. Therefore, this team employs a self-training strategy during the fine-tuning stage of Grounding DINO to iteratively refine the annotations in the training data. Specifically, Grounding DINO periodically generates predictions on the training set, which are then incorporated as additional annotations. This iterative process gradually improves the quality of the training data, ultimately leading to enhanced model performance. + +The substitution of the Mixture-of-Experts (MoE). In few-shot object detection, the availability of training data is highly limited. Therefore, maximizing the object detector's ability to extract supervision from this scarce data is crucial during the fine-tuning stage. In this challenge, beyond the few-shot constraint, the cross-domain setting further increases the difficulty, as detectors usually require additional supervision to effectively adapt to a new domain. + +The core concept of the MoE architecture is to enable different components (i.e., experts) of a model to specialize in different aspects of the data [2]. In recent years, MoE has gained popularity in multi-modal models, including Mistral [25] and DeepSeek-V2 [42]. A common application of MoE in such models is replacing the traditional feedforward network (FFN) with an MoE-based variant, as seen in Switch Transformer [10] and OpenMoe [80]. + +To maximize supervision and enable the model to learn effectively from the limited training data, this team integrates a Mixture-of-Experts (MoE) mechanism into Grounding DINO during the fine-tuning stage. The MoE framework allows different experts to specialize in distinct aspects of the data, facilitating the capture of more diverse and informative representations. It is hypothesized that this capability helps Grounding DINO better adapt to the target domain while making more efficient use of the available training data. + +In this team's approach, the MoE mechanism is incorporated into the feed-forward network (FFN) layers of Grounding DINO's Cross-Modality Decoder. As illustrated in Figure 3, the MoE architecture consists of one shared expert and three router-selected experts. + +# 4.1.2. Training Details + +A two-stage fine-tuning pipeline is adopted to adapt Grounding DINO for cross-domain few-shot object detection. In the first stage, the standard Grounding DINO (without the MoE substitution) is fine-tuned on the training data, + +![](images/0b7da84e2775e9ddbfcbf86d3acb6b3e314368a8cf2dd3dcc8102d0f04f792a6.jpg) + +![](images/e99f2e71e525eaa1944ac9b617b933a5aa42cf899e9ed92b704bb56a99e68e67.jpg) +Figure 3. Team MoveFree: an illustration of the substitution of MoE into Grounding DINO's decoder layers. + +with all parameters trainable except for the language encoder. In the second stage, the MoE architecture is introduced into the model. + +For the second stage, the model is initialized using the weights obtained from the first stage, excluding the MoE components. The shared expert within the MoE is initialized with weights from the first stage, while the three router-selected experts are initialized using the open-source pre-trained weights of Grounding DINO. This initialization strategy facilitates effective learning from limited training data while retaining knowledge acquired during the initial stage. During this phase, only the MoE components and the detection head remain trainable, with all other parts of the model kept frozen. + +Additionally, the self-supervised learning paradigm is applied in both stages to iteratively refine the training data and enhance performance. The training strictly adheres to the provided few-shot training set, without utilizing any external data. The overall approach is computationally efficient and can be executed on a single V100 GPU within a reasonable time frame. + +# 4.2.AI4EarthLab + +# 4.2.1. Proposed Method + +Foundation models pretrained on large-scale datasets, such as GroundingDINO [43] and LAE-DINO [51], have demonstrated strong detection performance in cross-domain zero-shot and few-shot object detection tasks. Thus, the AI4EarthLab team is motivated to explore such foundation models for CD-FSOD. + +As shown in Fig. 4, this team proposes an augmentation-search strategy for CD-FSOD, which leverages open-source data and transfers the model to novel target domains. Following the approaches in [15, 52], an efficient fine-tuning method is adopted to explore the cross-domain few-shot detection capabilities of foundation models, requiring only lightweight tuning to identify effective subfields. Code is made available3. + +![](images/43d8c11f2a43152c39a45bf877db09a41ebc256d1d670b340e4ee5df4386d35a.jpg) +Figure 4. Team AI4EarthLab: overall framework of augmentation-search strategy Enhance Then Search (ETS) with foundation model for CD-FSOD. + +Data augmentation has proven effective in reducing semantic confusion during few-shot fine-tuning, particularly in cases where categories—such as certain fruits—are visually and semantically similar. Through extensive few-shot experiments, it is observed that integrating image-based augmentation with optimal domain search strategies can further enhance the performance of foundation models, though their upper performance bound remains uncertain. Building upon the open-source Grounding DINO framework, several commonly used image augmentation techniques are incorporated, and specific optimization objectives are defined to efficiently search for optimal subdomains within a broad domain space. This strategy facilitates more effective few-shot object detection. The proposed augmentation-search strategy consists of the following steps: + +Step 1: Select the foundation model. This team adopts the Swin-B version of GroundingDINO as the foundation model, because of its best performance within the open-source model. This model has been pre-trained on a diverse set of large-scale datasets, including COCO, Objects365 (O365), GoldG, Cap4M, OpenImages, ODinW-35, + +and RefCOCO, which collectively provide strong generalization capabilities across multiple vision-language grounding tasks. + +Step 2: Build a combined image augmentation pipeline. To improve the model's adaptability to various subdomains under limited data scenarios, this team construct a composite image augmentation pipeline. This pipeline randomly applies a combination of augmentation techniques such as CachedMosaic, YOLOXHSVRandomAug, RandomFlip, CachedMixUp, RandomResize, and RandomCrop. These methods are designed to enhance sample diversity, simulate domain shifts, and improve the model's robustness during fine-tuning. Additional data augmentation techniques, such as Copy-Paste, are also evaluated. However, these methods are found to introduce greater instability during few-shot fine-tuning. + +Step 3: Construct an optimized target domain validation set. To evaluate adaptation performance, a subset of the annotated test data is sampled and used as a validation set. Rather than employing full annotations, coarse-grained labeling is applied to provide sufficient supervision for hyperparameter tuning, while significantly reducing annotation costs in the target domain. + +Step 4: Search for the best model parameters on the validation set. Hyperparameter search and model selection are conducted based on validation performance. This process involves tuning the learning rate, augmentation intensity, and other training configurations to determine the optimal setup for effective domain adaptation. + +Step 5: Perform inference on the test set. Once the optimal configuration is identified, the fine-tuned model is applied to the held-out test set to evaluate its final performance on the target domain. + +# 4.2.2. Training Details + +Experiments are conducted on eight NVIDIA A100 GPUs, executing $8 \times 50$ experiment groups per round. During training, the optimal step size is selected based on historical performance to accelerate the fine-tuning process. Learning rate schedules are adjusted using milestone epochs, typically set to 1, 5, and 9 depending on the fine-tuning setting. The model uses 900 queries by default and a maximum text token length of 256. A BERT-based text encoder with BPE tokenization is employed. Both the feature enhancer and cross-modality decoder consist of six layers, and deformable attention is adopted in the image cross-attention modules. The loss function comprises classification (or contrastive) loss, box L1 loss, and GIoU loss. Following the Grounding DINO framework, Hungarian matching weights are set to 2.0 (classification), 5.0 (L1), and 2.0 (GIoU), while the final loss weights are 1.0, 5.0, and 2.0, respectively. Although various hyperparameter configurations are also explored, their impact is found to be relatively minor compared to that of data augmentation strategies. + +# 4.3. IDCFS + +# 4.3.1. Proposed Method + +The IDCFS team proposes a Pseudo-Label Driven Vision-Language Grounding method for CD-FSOD. As shown in Figure 5, the proposed method mainly combines large-scale foundation models with an iterative pseudo-labeling strategy. The GLIP [35] is being fine-tuned using three approaches, with the full model fine-tuned delivering the best results in most cases. To better exploit the support set, an iterative training strategy is proposed and applied, using high-confidence predictions as pseudo-labels to refine the model. Additionally, this team also fine-tunes Grounding DINO [43] with LoRA [21], efficiently modifying the attention layers while freezing the base model. Finally, the model ensemble with confidence-reweighted NMS is further adopted to boost accuracy. Code is made available4. + +![](images/85b77445909842aa7ec248e69a7ffa63394b4c2f44fa7b5de322b7355f4aed5c.jpg) +Figure 5. Team IDCFS: overview of the proposed Pseudo-Label Driven Vision-Language Grounding for CD-FSOD. + +Fine-tuning on GLIP. Foundation models pretrained on large-scale datasets, such as GLIP [35], have demonstrated strong performance in zero-shot and few-shot object detection tasks. The proposed method is based on the GLIP-L model, which has been pretrained on several datasets including FourODs, GoldG, CC3M+12M, and SBU. For downstream tasks, this team tried three ways to fine-tune GLIP: 1) Full Model Fine-Tuning: fine-tune all parameters of the GLIP-L model using a relatively small learning rate $(\mathrm{lr} = 2\mathrm{e} - 5)$ . 2) Prompt Tuning V1: fine-tune only the parameters of the text branch. 3) Prompt Tuning V2: This mode performs traditional prompt tuning by applying a linear layer to map the extracted text features. Experiments show that Full Model Fine-Tuning generally achieves the best fine-tuning performance in most cases. + +Iterative Training. Given the scarcity, high cost, and limited availability of annotated data in few-shot learning scenarios, this team also designed an iterative training approach to train the model, as shown in Figure 6. Specifically, the proposed method first fine-tunes the model for + +a few steps using the available labeled data. Then, the fine-tuned model is used to predict the support set samples, selecting the predictions with high confidence as pseudolabels to update the label information of the support set samples. The model is then fine-tuned again. By iterating this process, the proposed method fully utilizes the information in the support set samples, achieving better performance while ensuring the robustness of the model, making it less susceptible to the influence of low-quality labels. + +![](images/e1bae98ebc90a5af43497e591c20abbf6d9c63d8ec25b909a6702559d0ae8005.jpg) +Figure 6. Team IDCFS: overview of the iterative training process. + +Fine-tuning Grounding DINO with LoRA. The IDCFS team also uses Grounding DINO [43] as another foundation model to generate bounding boxes and classification probabilities. The LoRA [21] is used to fine-tune GroundingDINO on the few-shot training set. Specifically, this team adds bypass adapters to the linear projection layers (i.e., query, key, and value) of the attention mechanism in the visual backbone and BERT of Grounding DINO. To facilitate better adaptation to cross-domain datasets, the original model weights are frozen, and only the newly added parameters are trained. + +Model Ensemble. To effectively combine the outputs of GLIP and Grounding DINO, a model ensemble strategy with confidence reweighting is employed. Specifically, the detection scores from each model are scaled by predefined reliability weights. The reweighted predictions are then merged and refined using Non-Maximum Suppression (NMS) [47] to eliminate redundant bounding boxes and produce the final fused results. This approach allows the more reliable model to have a greater influence on the final predictions, enhancing detection performance by leveraging the complementary strengths of both models. + +# 4.3.2. Training Details + +For GLIP fine-tuning, the GLIP-L variant is used, which incorporates Swin-L [46] as the visual encoder and BERT [7] as the text encoder. The model is pre-trained on a variety of datasets, including FourODs [29-31], GoldG [27], CC3M+12M, and SBU [49]. During fine-tuning, full-model training is applied with a reduced learning rate of 2e-5, compared to the original setting of 1e-4 in GLIP. For Grounding DINO, the Swin-B [46] backbone is used as the vi + +sual encoder and BERT from Hugging Face [78] as the text encoder. The model is pre-trained on COCO [41], Objects365 [65], GoldG [27], Cap4M, OpenImages [31], ODinW-35 [33], and RefCOCO [27]. For the 1-shot and 5-shot settings on the CARPK dataset [20], no fine-tuning is performed. For 1-shot training on DeepFruits [60], only the backbone is fine-tuned using LoRA. In all other cases, LoRA is used to fine-tune both the backbone and the BERT text encoder. + +# 4.4. FDUROILab_Lenovo + +# 4.4.1. Proposed Method + +Efficient Tuning. To enhance the model's adaptability in cross-domain few-shot detection (CDFSOD), this team proposes an efficient fine-tuning strategy. The proposed approach leverages data augmentation techniques to expand the training set and improve the model's ability to recognize objects in the target domain with proposed k-shot annotated samples. + +Specifically, given a k-shot setting, where $\mathbf{k}$ represents the number of provided object samples, the proposed approach adopts a structured fine-tuning pipeline, which is shown in Figure 7. + +![](images/9f7c58c6153c5492de9247cf262c01c36f6cfc8b799078979c8b237f362f2ad7.jpg) +Figure 7. Team FDUROILab_Lenovo: overview of the efficient tuning and inference. + +(1) Object Cropping and Augmentation. Using the provided bounding boxes of k-shot examples, the proposed method first crops the target objects from the original images. The cropped objects are then subjected to various data augmentation techniques, including flipping, rotation, grayscale conversion, and other transformations, to introduce diversity and improve generalization. (2) Object Rescaling and Random Pasting. The proposed method randomly rescales the augmented objects to different sizes and pastes these transformed objects to the original images at different locations. This step simulates new object placements and enhances the model's robustness to variations in object appearance and context. (3) Fine-Tuning with Augmented Data. The proposed method finetunes the open-vocabulary detection model with the augmented images. This enables the detector to better adapt to objects in the target domain, even with minimal labeled examples. Additionally, the augmented data effectively increases the number of + +training samples, mitigating the few-shot learning limitation and improving overall detection performance. Through this efficient fine-tuning approach, the finetuned model gains enhanced adaptability to new domains while maintaining the advantages of open-vocabulary detection. + +Inference. Since the proposed approach is based on an open-vocabulary detection model, it requires access to the target category labels during inference, which is shown in Figure 7. To obtain these labels, this team utilizes Qwen2.5-VL [1] to generate the textual descriptions of the target categories. The retrieved target labels from Qwen2.5-VL are used as textual input to guide the detection process. Then, the open-vocabulary detection model [11] is used to identify and classify objects in the test image based on the provided text-based labels. + +![](images/c281bd39de4691807c301ba17b6d49273f8d7a7767a20c78259bbb45b9f42084.jpg) +Figure 8. Team FDUROILab_Lenovo: post processing. + +Post-Process. Although existing open-vocabulary detectors possess strong open-set detection capabilities, their performance on the challenge test set remains suboptimal. Upon further analysis, this team found that while the detector can successfully identify most objects, its primary weakness lies in classification errors rather than detection failures. This indicates that the open-vocabulary detection model still struggles with accurate classification when adapting to objects in a new domain. To address this issue, the Qwen2.5-VL is introduced as an auxiliary classifier to refine the final predictions, which is shown in Figure 8. For each test image, this team prompts Qwen2.5-VL to describe the objects present in the scene and provide a list of candidate categories that are likely to appear in the image. After that, this team refines the output of the open-vocabulary detection model using one of two strategies: (1) Filtering. Remove objects that are classified incorrectly by the detector and are not listed by Qwen2.5-VL. (2) Reclassification: Assign all detected objects to one of the categories predicted by Qwen2.5-VL, ensuring consistency between the detected bounding boxes and the high-level scene understanding of the multimodal model. The choice between these two strategies depends on the specific test dataset. By leveraging Qwen2.5-VL as a post-processing step, this team effectively corrects classification errors and enhances the + +model's performance on unseen domains, leading to more accurate and reliable object detection results. + +# 4.4.2. Training Details + +LLMDet [11] is adopted as the open-vocabulary detection model, with Swin-Large [46] serving as the visual backbone. The Qwen2.5-VL-72B [1] is introduced as the multimodal large language model (MLLM). Fine-tuning experiments are conducted on eight NVIDIA RTX 3090 GPUs, using a batch size of 8 and a learning rate of 1e-6. The number of training iterations varies across datasets and few-shot settings. For DeepFruits [60] and CarDD [76], the model is fine-tuned for 30, 50, and 100 batches under the 1-shot, 5-shot, and 10-shot settings. No fine-tuning is performed for CARPK [20]. + +To enhance classification accuracy, dataset-specific post-processing strategies are applied. For DeepFruits, all detected objects are reclassified into one of the categories predicted by Qwen2.5-VL. In the case of CarDD, detected objects not belonging to the predefined categories are filtered out. As CARPK contains only a single object category, no additional classification is performed. However, further filtering is applied to remove overly large bounding boxes, which are likely to be incorrect, as the objects in this dataset are generally small. In all cases, Non-Maximum Suppression (NMS) is used to eliminate redundant or overlapping predictions. + +# 4.5. HUSTLab + +# 4.5.1. Proposed Method + +The HUSTLab explores the usage of Qwen2.5VL, MM-GroundingDINO, and LLMDet for the open-source CD-FSOD. The proposed method can be divided into two distinct phases: 1) Obtaining text descriptions from the training set using the Qwen2.5VL model; 2) Selecting a base model, such as Grounding DINO or LLMDet, and fine-tuning it with CopyPaste data augmentation, followed by Adversarial Weight Perturbation (AWP) training to derive the final model and obtain test results. We observe that models like Grounding DINO possess robust object detection capabilities, and fine-tuning them with few-shot data significantly enhances detection performance in specific domains. Moreover, for training sets with limited samples, utilizing text descriptions generated by large-scale vision-language models proves highly effective. + +Text Description Generation with a Large VLM. In this phase, this team leverages Qwen2.5VL to generate detailed text descriptions for the limited samples in the training set, extracting text-modal information from the images [50]. Converting visual-modal information into text-modal information helps eliminate noise and condense semantic content. These detailed text descriptions are robust and will be fully utilized during the testing phase to enhance cross + +![](images/cda92802f910d1d9c86fcb9690a9c013f2143055bf9564f0a4664b98cfef3300.jpg) +Figure 9. Team HUSTLab: overall framework of the proposed method. + +domain few-shot object detection performance. + +![](images/02bfe67dc0273f0b6bb1570a13e9e272e3fc2bb1f70285eea045d85d22cd74f4.jpg) +Figure 10. Team HUSTLab: text description generation [50]. + +Training Phase. In this stage, this team first selects an appropriate base model—either Grounding DINO[43] or LLMDet—based[11] on its compatibility with the dataset. Using the zero-shot capabilities of the chosen base model, this team generates pseudo-labels, which are combined with ground-truth labels during training to regularize the model under few-shot conditions. To fine-tune the base model, this team uses CopyPaste[17] data augmentation and Adversarial Weight Perturbation (AWP) techniques[79]. This approach strengthens the model's generalization and robustness, enabling it to effectively handle cross-domain few-shot object detection tasks. + +# 4.5.2. Training Details + +The model is fine-tuned on three datasets using the MM-GroundingDINO-Large implementation provided by MMDetection as the base object detection framework, with the aim of enhancing cross-domain detection capabilities. The performance largely depends on prompt design. Since part of the BERT-based text encoder is kept frozen during training, prompt quality plays a crucial role in boosting performance for certain object detection tasks. Prompts generated using Qwen2.5-VL are able to accurately describe the attribute features associated with abstract category names, thereby assisting the model in object localization and recognition. All experiments are conducted on $4 \times$ NVIDIA RTX 3090 GPUs. + +# 4.6. TongjiLab + +# 4.6.1. Proposed Method + +The TongjiLab proposes ProtoDINO, an innovative approach for CD-FSOD under the open-set setting, building + +upon GroundingDINO [43] as the baseline model. To improve the target classification performance of the baseline model, the CLIP model [22, 54] is employed to extract both local and global image features from a limited set of target domain samples. These features are subsequently used to construct support sets, which serve as the foundation for building local prototype and global prototype networks, respectively. In addition, a text prototype network is developed using the CLIP model. During the target detection phase, visual features are extracted from each image query using CLIP. The L2 distances between these visual features and the local prototypes, global prototypes, and text prototypes are then computed, with these distances serving as one of the metrics for target classification. Furthermore, a car-damage-detection model5, implemented as a vehicle appearance damage classification model based on the Vision Transformer (ViT), is incorporated. For the final target classification, matching probabilities derived from the GroundingDINO model, the car-damage-detection model, and the prototype networks [66] are weighted and combined to produce the overall classification metric. + +The framework of the proposed ProtoDINO is depicted in Fig. 11. Overall, ProtoDINO operates in two key stages: prototype construction and target detection. + +![](images/8e3b6e9faa6f1d53069bf24196c367f48a6404b2c370382e50a39429b69ee961.jpg) +Figure 11. Team TongjiLab: framework of the proposed ProtoDINO. + +Prototype Construction. During the prototype construction phase, this team crops few-shot learning images based on their annotations and generates visual embeddings as local feature prototypes $c_{local}$ for these local patches using the CLIP model. For 5-shot and 10-shot settings, $c_{local}$ is computed as the mean of all visual embeddings within the same category. Similarly, global feature prototypes $c_{global}$ are derived by encoding entire images through CLIP and applying the same averaging strategy across categories. For each category text $t$ , this team builds the text prototype $c_{text}$ using CLIP as the text encoder. + +$$ +c _ {l o c a l} ^ {(n)} = \frac {1}{n} \sum_ {i = 1} ^ {n} F _ {c r o p} ^ {(i)} \tag {1} +$$ + +$$ +c _ {g l o b a l} ^ {(n)} = \frac {1}{n} \sum_ {i = 1} ^ {n} F _ {i} ^ {(i)} \tag {2} +$$ + +$$ +c _ {t e x t} ^ {(n)} = f _ {\text {c l i p - t e x t}} \left(t ^ {(n)}\right) \tag {3} +$$ + +Target Detection. In the target detection stage, the input image and target category texts are processed by GroundingDINO to generate bounding boxes and initial classification probabilities. These bounding boxes are used to crop local regions from the image, which are then encoded by CLIP to obtain their visual features $F_{crop}$ . To classify these regions, this team computes the L2 distances between their representations and the precomputed prototypes as in Eq. 4. These distances are transformed into probability distributions via a softmax operation, yielding the prototype network's classification output as in Eq. 5. Simultaneously, the cropped regions are evaluated by a pre-trained car-damage-detection model (based on Vision Transformer) to generate additional classification probabilities. The final classification decision is derived by aggregating probabilities from GroundingDINO, the car-damage-detection model, and the prototype network through a weighted summation as in Eq. 6. This fusion approach effectively integrates geometric localization from GroundingDINO, cross-modal semantics from CLIP, domain-specific insights from the car-damage-detection model, and few-shot prototype matching. + +$$ +d (u, v) = \sqrt {\sum_ {n} \left(u ^ {n} - v ^ {n}\right) ^ {2}} \tag {4} +$$ + +$$ +p r o b s _ {p r o t o} = - \frac {1}{\sigma} \cdot e ^ {N o r m [ d (F, c) ]} \tag {5} +$$ + +$$ +p r o b s = \sum_ {i} w _ {i} \cdot p r o b s _ {i} \tag {6} +$$ + +# 4.6.2. Training Details + +The implementation is carried out on a server running CentOS 7, equipped with a single RTX 6000 Ada GPU. For the CLIP model, the DFN5B-CLIP-ViT-H-14-378 implementation is selected due to its balance between performance and efficiency in processing visual and textual data. For the GroundingDINO model, the official implementation is used. Based on empirical observations, the threshold parameter $\sigma$ is set to 0.5, which provides optimal results across various scenarios. In GroundingDINO, the bounding box confidence threshold (BOX_THRESHOLD) is set to 0.3. For the final decision fusion, the weighting coefficients for integrating outputs from multiple modules are empirically assigned as: $w_{\mathrm{local}} = 0.25$ (local prototype network), $w_{\mathrm{global}} = 0.15$ (global prototype network), $w_{\mathrm{text}} = 0.4$ (text + +prototype network), $w_{\mathrm{dino}} = 0.1$ (GroundingDINO), and $w_{\mathrm{car}} = 0.1$ (car-damage-detection model). + +# 4.7. Manifold + +# 4.7.1. Proposed Method + +To address the challenge of few-shot object detection in cross-domain scenarios, the Manifold team proposes a novel approach based on the detection pipeline of a two-stage object detection algorithm. As illustrated in the Figure. 12, the proposed method first employs an open set object detection network, which is trained on public datasets, to detect objects in the query image. However, due to the domain gap between the pretraining datasets and the query datasets, the detection results cannot be directly trusted. Therefore, this team treats these results as region proposals that may contain objects of interest. Subsequently, this team combines the instance features from the support set for classification to obtain the final detection results. + +![](images/e2f28966d29b1d9309d4b9cb28111ee3b3de5a35e827252d153c528d74e5800c.jpg) +Figure 12. Team Manifold: overall framework of GDPRE. + +GroundingDINO-based Region Proposals. The GroundingDINO is selected as the pre-trained open-set object detector. It can detect objects of interest in images using input text, and it was pre-trained on seven datasets: COCO, O365, GoldG, Cap4M, OpenImage, ODinW-35, and RefCOCO. This pre-training gives it good detection capabilities for most real-world objects. However, in cross-domain few-shot scenarios, its detection effectiveness is suboptimal. For example, avocados may be misclassified as oranges because of the higher frequency of oranges in the pre-training data. Despite this, GroundingDINO can still provide region proposals for potential objects of interest in query images. + +ResNet-based Feature Classification. After obtaining region proposals, this team classifies the objects within them using support set images. Given the limited samples and significant intra-class variations in image space, directly matching support instances with query candidates in this space yields poor results. ResNet pre-trained on ImageNet is used to extract image features, mapping instances to a more robust feature space. To address scale differences, this team resize instances in both support and region proposals images to $256 \times 256$ for feature extraction. Considering + +some classes have large intra-class and small inter-class differences, this team treats each instance's feature vector in multi-shot settings as a separate support vector rather than averaging them by class. This team calculates the cosine similarity between candidate region instances and support set instance feature vectors, assigning the region proposal instance to the class of the most similar support instance. This yields the final detection results, and the cosine similarity serves as the prediction confidence. + +# 4.7.2. Implementation Details + +Given that both GroundingDINO and ResNet are pretrained on large-scale datasets, fine-tuning them under few-shot constraints—where the training classes do not overlap with the test classes—can be challenging. As a result, the pre-trained model weights are kept frozen. This approach requires minimal computational resources and can be executed on a laptop equipped with an RTX 4060 GPU. During inference, the category names from the test dataset are used as prompt inputs for GroundingDINO, and the BOX_THRESHOLD is set to 0.1 to obtain the final detection results. + +# 4.8.MXT + +# 4.8.1. Proposed Method + +This team proposes a Domain Adaptation Enhancement Module (DAEM) for Cross-Domain Few-Shot Object Detection (CD-FSOD), built as an extension to the CD-ViTO framework. While CD-ViTO provides a strong foundation for open-set cross-domain detection with DinoV2 ViT-L backbone, it still faces challenges with significant domain shifts. As illustrated in Fig 13, the DAEM integrates seamlessly with the DinoV2 ViT-L backbone and enhances domain adaptation through two complementary mechanisms: batch enhancement and feature alignment. + +![](images/01810eb83ce91b149e958f580b0174e2285a8dc0d030ed2e1c8e83e917133bfe.jpg) +Figure 13. Team DAEM: overall of the proposed model. + +Batch Enhancement Module. The batch enhancement module increases training diversity through controlled style transfer between domains. For both source and target do + +main images, this team introduces cross-domain characteristics while preserving semantic content: + +$$ +\operatorname {i m g} _ {\text {s t y l e d}} = \sigma_ {t} \cdot \frac {\operatorname {i m g} - \mu_ {s}}{\sigma_ {s}} + \mu_ {t} \tag {7} +$$ + +where $\mu_s, \sigma_s$ are source image statistics and $\mu_t, \sigma_t$ are target domain statistics. The enhancement strength $\alpha$ gradually increases during training as follows: + +$$ +\alpha = \min (1. 0, \frac {t}{T _ {\text {w a r m u p}}}) \tag {8} +$$ + +where $t$ is the current iteration and $T_{warmup}$ is set to 500. This gradual adaptation prevents disrupting the pre-trained DinoV2 ViT-L features early in training. + +Feature Alignment Module. The feature alignment module employs two complementary strategies to reduce domain gaps: Maximum Mean Discrepancy (MMD) and style-based adaptation. + +MMD Loss: The Maximum Mean Discrepancy is applied to reduce distribution differences between features from the source and target domains. MMD measures the distance between feature distributions in a reproducing kernel Hilbert space: + +$$ +\mathcal {L} _ {M M D} \left(\mathbf {X} _ {s}, \mathbf {X} _ {t}\right) = \left\| \frac {1}{n _ {s}} \sum_ {i = 1} ^ {n _ {s}} \phi \left(\mathbf {x} _ {s} ^ {i}\right) - \frac {1}{n _ {t}} \sum_ {j = 1} ^ {n _ {t}} \phi \left(\mathbf {x} _ {t} ^ {j}\right) \right\| _ {\mathcal {H}} ^ {2} \tag {9} +$$ + +This is implemented with multiple Gaussian kernels with bandwidths $\sigma \in \{0.5, 1.0, 2.0, 5.0\}$ to capture similarities at different feature scales. This approach guides DinoV2 ViT-L to preserve its powerful representation abilities while adapting to target domains with minimal samples. + +Style Loss: Style-based adaptation addresses visual variations between domains that are unrelated to object semantics. For feature maps $\mathbf{F}$ , the channel-wise statistics is transformed as: + +$$ +\hat {\mathbf {F}} = \sigma_ {t} \cdot \frac {\mathbf {F} - \mu_ {s}}{\sigma_ {s}} + \mu_ {t} \tag {10} +$$ + +where $\mu_s, \sigma_s$ and $\mu_t, \sigma_t$ are the channel statistics of source and target features. This approach helps Di-noV2 ViT-L focus on domain-invariant object characteristics rather than domain-specific visual styles. + +The overall training objective combines the original CDViTO detection loss with the proposed domain adaptation components: + +$$ +\mathcal {L} = \mathcal {L} _ {\text {d e t}} + \lambda_ {m m d} \mathcal {L} _ {M M D} + \lambda_ {\text {s t y l e}} \mathcal {L} _ {\text {s t y l e}} \tag {11} +$$ + +# 4.8.2. Training Details + +Following the pretrain–finetune–test pipeline established in the CD-FSOD benchmark, the pretrained DinoV2 ViT-L backbone from CD-ViTO is utilized. During fine-tuning, the backbone and Region Proposal Network (RPN) are selectively frozen, while the Domain-Adaptive Enhancement Modules (DAEM) and ROI Heads are optimized. This strategy preserves the general representational power of DinoV2 ViT-L while allowing domain-specific components to adapt effectively. + +Training is conducted on NVIDIA A800 GPUs, with hyperparameters determined through extensive experimentation: the MMD loss weight is set to $\lambda_{mmd} = 0.16$ , the style loss weight to $\lambda_{style} = 0.12$ , and the batch enhancement strength to $\alpha_{max} = 0.8$ . Differential learning rates are applied, using a multiplier of 2.0 for the DAEM modules and bias terms, with a base learning rate of $1 \times 10^{-4}$ . + +A warm-up phase of 500 iterations is introduced to gradually increase adaptation strength. This helps stabilize early-stage training and prevents disruption of the pretrained DinoV2 ViT-L features. Optimization is performed using stochastic gradient descent (SGD) with a momentum of 0.9 and a weight decay of $1 \times 10^{-4}$ . The model reaches optimal cross-domain performance after approximately 50 epochs. The proposed approach maintains the efficiency of CD-ViTO while delivering substantial improvements in challenging cross-domain few-shot detection scenarios. + +# 5. Special Closed-Source Track Methods + +# 5.1. X-Few + +# 5.1.1. Proposed Method + +To address the challenges of domain shift and category confusion arising from limited annotated data in CD-FSOD, the X-Few team proposes a novel domain adaptation strategy based on the Instance Feature Caching (IFC) mechanism. The framework of the proposed method is shown in Fig. 14, which is mainly built upon the CD-ViTO baseline. Code is made available $^{6}$ . + +Intuitively, the IFC module is proposed to construct a cache model that could store and dynamically retrieve discriminative instance-level features from the target domain, alleviating model degradation caused by cross-domain distribution discrepancy in the few-shot supervision situation. Specifically, the IFC mechanism facilitates knowledge transfer through prototype-based feature alignment and an attention-guided memory update strategy, enhancing the model's generalization capability in the data-scarce cross-domain scenario. + +Instance Feature Caching Construction. Given a support set $S$ comprising $N$ target categories, each consisting + +![](images/3a8a5a344c8d7a48b7252170d1c6170ca1b9ad7c65a3ecb9a38a78a25b908a37.jpg) +Figure 14. Team X-Few: illustration of the proposed Instance Feature Caching (IFC). + +of $K$ annotated instances, denoted as $I_{K}$ with their associating labels $L_{N}$ . For all $N \times K$ support samples, the proposed method leverages a pre-trained DINoV2 ViT $f_{CM}$ to obtain the instance-level features $F_{train} \in \mathbf{R}^{NK \times C}$ . Similarly, the ground-truth labels are also encoded into $N$ -dimensional one-hot vectors $L_{train} \in \mathbf{R}^{NK \times N}$ : + +$$ +F _ {t r a i n} = \mathbf {f} _ {C M} \left(I _ {K}\right) \tag {12} +$$ + +$$ +L _ {\text {t r a i n}} = \mathbf {O n e H o t} \left(I _ {N}\right) \tag {13} +$$ + +The feature extraction step is performed in an offline fashion to ensure persistent storage of high-quality feature representations for support set instances, thereby preserving discriminative semantic characteristics and spatial-aware contextual patterns in a memory-efficient manner. Then, these features and their corresponding label encodings are systematically cached to establish a comprehensive knowledge base that facilitates adaptive domain-aware detection while mitigating catastrophic forgetting. + +Instance Search. After constructing the instance feature caching, given a query image $\mathcal{L}$ , the proposed method first feeds $\mathcal{L}$ into both the Region Proposal Network and the Vision Transformer encoder to generate candidate regions and extract their deep features, respectively. These region proposals are then combined with the corresponding instance-level features in $\mathcal{L}$ to derive a query vector $f_{test}$ for each candidate bounding box. Then, the proposed method achieves the most relevant instance feature lookup and finally calculate the adaptation representation $A \times L_{train}$ for the target domain, where $\mathbf{A} \in \mathbf{R}^{NK}$ is the affinity matrix between query vector and instance feature caching, being defined as: + +$$ +\mathbf {A} = \exp (- \beta (1 - f _ {\text {t e s t}} F _ {\text {t r a i n}} ^ {T})) \tag {14} +$$ + +Ultimately, the domain adaptation representation is fed into the classification and regression branches of the original detection framework to calibrate prediction results from the open-set detector: + +1. Classification Enhancement: The similarity distribution between query features and cached features is leveraged to refine confidence estimates for the target domain categories through contrastive alignment. + +2. Localization Refinement: Retrieved instance localization priors are incorporated to constrain bounding box regression, thereby mitigating cross-domain localization biases caused by domain shifts. + +The above two strategies ensure that the detector adaptively aligns domain-invariant semantic representations while suppressing spurious correlations introduced by cross-domain discrepancies. + +# 5.1.2. Training Details + +A single RTX A800 GPU is used for the experiments. The model is pre-trained on COCO and fine-tuned on novel support images. For the DeepFruit[60], Carpk[20], and CarDD[76], the specific hyper-parameters settings are shown in the Tab. 2. The tailored combination of learning rates and epoch schedules reflects a fine-grained tuning strategy to address domain heterogeneity across datasets, ensuring optimal trade-offs between generalization and task-specific optimization. + +Table 2. Team X-Few: the hyper-parameters settings. + +
hyperparameter/shotDeepFruit [60]Carpk [20]CarDD [76]
151015101510
Batch size161616161616161616
Initial lr1e-31e-31e-31e-41e-41e-41e-31e-31e-3
Epoch40100200408010040100200
+ +# 5.2. MM + +# 5.2.1. Proposed Method + +The MM team proposes a novel DFE-ViT method for CD-FSOD, in the closed set setting, which only takes COCO as the source data and transfers the model to a novel target. As in Fig. 15, the proposed DFE-ViT method is built upon one open-set detector (DE-ViT) and finetuned using a few labeled instances from the target domain. New improvements include Instance Feature Enhancement, ROI Feature Enhancement. + +![](images/0ccac6ed1ed6b29bebe32679af7dcabc465e116a0d16d870635d362ad7bd1b03.jpg) +Figure 15. Team MM: overall framework of the DFE-ViT. + +Specifically, given $S$ and $q$ as input, DFE-ViT follows a similar pipeline as DE-ViT to obtain instance features $F_{ins}$ , region proposals $R_{q}$ , visual features $F_{q}$ , and ROI features + +$F_{q_{roi}}$ . However, different from directly using $F_{ins}$ to derive the class prototypes, an Instance Feature Enhancement module (IFE) and an ROI Feature Enhancement module (RFE) are proposed to enhance feature representation from both instance-level and ROI-level perspectives. + +The IFE module adopts a residual CBAM structure to refine $F_{ins}^{ob}$ , enabling the network to adaptively emphasize informative channels and spatial regions. To guide this attention process more explicitly, a dedicated CBAM loss $\mathcal{L}_{cbam}$ is designed, which encourages the enhanced instance features to align with salient regions in both spatial and channel dimensions. Furthermore, to enhance semantic alignment, a class prototype enhancement mechanism is further incorporated where each object instance interacts with its corresponding class prototype via cross-attention, ensuring more discriminative and category-aware features. The output of IFE is optimized jointly with the standard detection losses, including the localization loss $\mathcal{L}_{loc}$ , classification loss $\mathcal{L}_{cls}$ , and the attention-guided loss $\mathcal{L}_{cbam}$ . + +For ROI features, this team introduces RFE based on a Variational Autoencoder (VAE). Each ROI feature $F_{q_{roi}}$ is encoded into a latent distribution and then reconstructed, which enables learning a more robust and expressive representation. A reconstruction loss $\mathcal{L}_{vae}$ is employed to ensure fidelity and consistency in the learned latent space. This ROI-level enhancement complements the instance-level refinement, offering a more diversified and generalized feature representation. + +The top modules including the detection head $M_{DET}$ and the classification head $M_{CLS}$ are fine-tuned using the combined objective: + +$$ +\mathcal {L} = \mathcal {L} _ {l o c} + \mathcal {L} _ {c l s} + \alpha * \mathcal {L} _ {c b a m} + \beta * \mathcal {L} _ {v a e}. \tag {15} +$$ + +Instance Feature Enhancement. The IFE module aims to refine instance features by integrating spatial/channel attention and semantic guidance. Given input instance features $F_{ins} \in \mathbb{R}^{B \times C \times H \times W}$ , it first applies a residual CBAM to obtain spatially and channel-refined features $F_{cbam}$ . Then, class prototypes $P \in \mathbb{R}^{N \times C}$ are used to semantically enhance the instance features via a cross-attention mechanism. Specifically, query and key projections are computed as $Q = W_qF_{ins}$ and $K = W_kP$ , followed by attention: $A = \text{softmax}(QK^\top / \sqrt{d})$ . The attended prototype features are added with a learnable weight $\gamma$ , yielding $F_{proto}$ . The final enhanced features are computed as $F_{enh} = F_{cbam} + F_{proto}$ , which are more discriminative for downstream detection. + +ROI Feature Enhancement. The RFE module is based on a Variational Autoencoder and class prototype computation. As shown in Fig. 15, the orange modules represent the newly proposed contributions: using VAE to model ROI features and enriching them with class prototypes. Given input ROI features $x \in \mathbb{R}^{N \times C \times k \times k}$ , VAE + +encodes $x$ into latent mean $\mu \in \mathbb{R}^{N \times d}$ and log-variance $\log \sigma^2 \in \mathbb{R}^{N \times d}$ through linear layers. Latent variables are sampled as $z = \mu + \sigma \odot \epsilon$ using the reparameterization trick. Then, $z$ is decoded to reconstruct the ROI features $\hat{x} = \mathrm{Decoder}(z)$ . The reconstruction loss is computed as $L_{\text{recon}} = \frac{1}{N} \sum_{i=1}^{N} \| \hat{x}_i - x_i \|^2$ , and the KL divergence loss regularizes the latent distribution: $L_{KL} = -\frac{1}{2} \sum_{i=1}^{N} (1 + \log \sigma_i^2 - \mu_i^2 - \sigma_i^2)$ . The total VAE loss is $L_{vae} = L_{\text{recon}} + L_{KL}$ . Finally, class prototypes are computed to further enhance feature representation across categories. + +# 5.2.2. Training Details + +The model is trained in the "pretrain, finetune, and test" pipeline. Specifically, the base DE-ViT model pretrained on COCO is taken, then the $M_{DET}$ , $M_{CLS}$ , $IFE$ and $RFE$ are tuned on novel support images $S$ using the loss as in Eq. 15. The hyperparameter $\alpha$ temperature for $\mathcal{L}_{cbam}$ , $\beta$ temperature for $\mathcal{L}_{vae}$ are set as 0.3, 0.4 for all the target datasets. While the value $N_{dom}$ means the number of virtual domains depending on the number of target classes $N$ , specifically, $N_{dom} = 2 * N$ . The hyperparameter Top-K ( $K$ ) in DE-ViT is set to 5. For datasets with the number of classes $N$ less than 5, $K$ is set to $N$ . The trainable parameters are finetuned on 1-shot around 80 epochs, and on 5/10-shot around 50 epochs. The SGD with a learning rate of 0.002 is used as the optimizer. Experiments are performed on four A6000 GPUs. + +# 5.3. FSV + +# 5.3.1. Proposed Method + +The FSV team proposes an enhancement to the prototype-based detection for the cross-domain few-shot object detection (CD-FSOD) challenge under the closed-source setting, based on the CD-ViTO baseline model, as shown in Figure 16. Based on observations of the existing approach, this team found that CD-FSOD faces three key challenges. First, few-shot learning inherently suffers from limited example diversity. Second, conventional binary masking treats all spatial locations within an object region equally, which fails to prioritize more discriminative central regions over potentially noisy boundary areas. Third, standard cosine similarity calculations between query features and prototypes lack proper calibration, resulting in suboptimal separability across domain shifts. To solve these three challenges, this team explores three techniques: (1) Support Set Data Augmentation, (2) Soft Mask-Based Prototype Aggregation, and (3) Temperature-Scaled Similarity Calibration. + +Support Set Data Augmentation. For the support images, the proposed approach constructs a stochastic augmentation function to increase the diversity of the samples. DINOv2 [48] is used as the feature extraction backbone for the augmented data, for its robust self-supervised learning capa + +![](images/8e0a5ce6cbe5f1bc98968e9128663375e2bf4d8e17311e0087fd301456ee1c0b.jpg) +Figure 16. Team FSV: overview of the proposed method. + +bilities and effective cross-domain transfer. The augmentation pipeline consists of a composition of transformations including Random Saturation, Random Contrast, Random Brightness, Random Flip, Random Rotation, Random Crop, Random Erasing, and Resize Shortest Edge. + +Soft Mask-Based Prototype Aggregation. To prioritize more discriminative central regions over potentially noisy boundary areas, the conventional binary masks are replaced by Gaussian soft masks to create soft spatial attention. Let $F_{ins} = \{F_{ins}^{ob}, F_{ins}^{bg}\}$ denote the extracted instance features and $M$ denote the binary mask of an instance. The soft mask could be defined $\tilde{M}$ as: $\tilde{M} = \frac{G_{\sigma}(M)}{\max G_{\sigma}(M)}$ , where $G_{\sigma}$ is the Gaussian filter with standard deviation parameter $\sigma$ . The extracted instance features for foreground objects $F_{ins}^{ob}$ are then weighted by the soft mask $\tilde{M}$ , used as the initialization for learnable instance features. + +Temperature-Scaled Similarity Calibration. Finally, to calibrate image features to other domains, the proposed approach takes temperature scaling to make the final prototypes better match those in the new domain, which is a simple yet effective strategy to improve the discriminability of similarity scores. Let $F_{q_{roi}}$ denote the ROI features extracted from a query image using DINOv2. $F_{pro}$ denotes the prototype vector. The temperature scaling is applied during the cosine similarity computation as + +$$ +s _ {\tau} = \frac {F _ {q _ {r o i}} ^ {\top} F _ {p r o}}{\tau \cdot \| F _ {q _ {r o i}} \| \cdot \| F _ {p r o} \|}, \tag {16} +$$ + +where $\tau$ is a temperature parameter that controls the sharpness of the similarity distribution. By tuning the temperature parameter, the entropy of the output distribution can be better modulated. + +# 5.3.2. Implementation Details + +The training procedure utilizes only the provided few-shot datasets (1-shot, 5-shot, and 10-shot variants), without incorporating additional external data. The trainable parameters are finetuned for each testing dataset around 100 epochs. The training batch size is 16, with a base learning rate of 0.002. The parameter $\sigma$ in Soft Mask-Based Prototype Aggregation is set to 2.0. The parameter $\tau$ in Temperature-Scaled Similarity Calibration is set to 0.07. + +Experiments are performed on four NVIDIA A100 GPUs. + +# 5.4. IPC + +# 5.4.1. Proposed Method + +The IPC team utilizes CD-ViTO as the baseline, which is an improved version of the DE-ViT method, designed to enhance the cross-domain detection capability. To further mitigate performance degradation caused by cross-domain discrepancies and a very small number of test domain reference examples, this team was inspired by [59] to introduce a test-time adaptation algorithm during the inference phase. + +![](images/3fc263e3ffbb96207506aa0a9167656cd36623edb2810e5480bf08198e1c4a2a.jpg) +Figure 17. Team IPC: overview of the proposed approach. The upper section represents the baseline CD-ViTO fine-tuning phase; the lower section represents the test-time adaptation (TTA) process. The TTA procedure operates without access to the original training data, updating the fine-tuned detector on a single testing image before making a prediction. Crucially, only the mask prediction module in CD-ViTO undergoes gradient updates during TTA iterations. + +![](images/8c8332ca03b01b9b660825506ac66cda3019c014dd3bbd75b026f0da59b8569c.jpg) +Figure 18. Team IPC: by iteratively retaining proposals (yellow boxes $\square$ ) with high confidence scores as pseudo labels (red boxes $\square$ ), the model can effectively filter out most invalid detection boxes. + +To be specific, the proposed approach employs an iterative process as shown in Fig 17. During each iteration $t$ (where $t \in \{1, \dots, T\}$ ), the existing detector $\theta_{t-1}$ generates predictions $D_t = \{(b_{t,i}, p_{t,i}) : \forall i\}$ for image $I$ , with $b_{t,i}$ representing the $i^{th}$ object's bounding box and $p_{t,i} \in [0,1]^K$ denoting the class probability distribution across $K$ categories. The detection confidence $c_{t,i} \in [0,1]$ is determined by the highest probability in $p_{t,i}$ , while the + +corresponding class index gives the predicted object category $y_{t,i} \in \{1, \dots, K\}$ . Confident detections are then selected as pseudo-labels as illustrated in Fig 18: $P_t = \{(b_{t,i}, y_{t,i}) : c_{t,i} > \lambda_{conf}\}$ , with $\lambda_{conf}$ serving as the confidence cutoff. The detector is subsequently refined through gradient descent on these pseudo-labels, yielding an improved model $\theta_t$ . + +For the initial iteration $(t = 1)$ , the detector $\theta_{t - 1}$ is initialized as $\theta_0$ , which was pre-trained on source domain data. Upon completion of the final iteration $(t = T)$ , the optimized model $\theta_T$ produces the final predictions for $I$ . Notably, this self-training paradigm maintains the original network architecture and operates without requiring access to source data or any other pretrained foundation models during adaptation. + +# 5.4.2. Training Details + +A single NVIDIA A6000 GPU is used for all experiments. The proposed method extends the CD-ViTO baseline through a test-time adaptation pipeline, initialized with k-shot instance fine-tuning on novel support datasets. During inference, the proposed method processes each test image using momentum SGD ( $\beta = 0.9$ , $\alpha = 0.001$ ) to exclusively update the mask prediction module through 5 iterations. For all experimental datasets, the cut-off confidence threshold $\lambda_{conf}$ is empirically set to 0.6. + +# 5.5.LJY + +# 5.5.1. Proposed Method + +As shown in Fig. 19, the LJY team proposes similarity calibrated prototype refinement network, which utilizes query-aware guidelines to generate prototypes. The network contains a pretrained DINOv2 ViT, a region proposal network, an ROI align module, a detection head, and a one-vs-rest classification head. During the finetuning stage, the parameters of DINOv2 ViT are frozen. Only the parameters of the detection head and the classification head are finetuned. + +![](images/a08ef0f9dc4809732e94f6a54a4e9fb6edf8cb2fe27dba8bc5508c95462c3ad5.jpg) +Figure 19. Team LJY: overall framework of SCPR. + +Given a query image $\pmb{q} \in \mathbb{R}^{H \times W \times C}$ and a set of support images $S$ , where $H, W$ and $C$ stand for the num- + +ber of height, width and channels, respectively, the DINOv2 ViT backbone is used for obtaining query patches $\pmb{F}_{q} \in \mathbb{R}^{d}$ and support patches $\pmb{F}_{s}$ . Then, two linear layers are applied to project the query patches $\pmb{F}_{q}$ to $\pmb{Q}$ and $\pmb{K}_{1}$ and project the support patches $\pmb{F}_{s}$ to $\pmb{K}_{2}$ . The query patches $\pmb{F}_{q}$ and the support patches $\pmb{F}_{s}$ are then concatenated to obtain $\pmb{F}_{cat} = \text{Concat}(\pmb{F}_{q}, \pmb{F}_{s})$ . The concatenated patches $\pmb{F}_{cat}$ are projected to obtain $\pmb{V}$ . To align the query patches and the support patches, the proposed method conducts scaled dot product on query patches $\pmb{F}_{q}$ and itself to obtain self attention score $A_{self} = \frac{\pmb{Q}\pmb{K}_{1}^{\top}}{\sqrt{d}}$ . Meanwhile, cross-attention score is computed using cosine similarity to ensure scale invariance $A_{cross} = \frac{\pmb{Q}\pmb{K}_{2}^{\top}}{\|\pmb{Q}\|_{2}\|\pmb{K}_{2}\|_{2} + \epsilon}$ where $\epsilon$ is a small constant to avoid division by zero. The combined attention score is obtained by concatenating both and then be normalized by the softmax operation $A = \text{Softmax}(\text{Concat}(\pmb{A}_{self}, \pmb{A}_{cross}))$ . The refined query representation is obtained by applying attention weights to the value matrix $\hat{\pmb{F}}_{q} = \pmb{F}_{q} + \pmb{A}\pmb{V}$ . With the aligned query patches, the proposed method then generates prototypes with query-perceptual information. To further calibrate support features, their cosine similarity with the refined query is computed: $Sim = \text{Softmax}\left(\frac{\pmb{F}_{s}\pmb{F}_{q}^{\top}}{\|\pmb{F}_{s}\|_{2}\|\pmb{F}_{q}\|_{2} + \epsilon}\right)$ . This similarity is used to re-weight the support representations: $\hat{\pmb{F}}_{s} = \pmb{F}_{s} + Sim*\hat{\pmb{F}}_{q}$ . A learnable weighting function is applied via a sigmoid transformation: $W = Sigmoid(FC(\hat{\pmb{F}}_{s}))$ . Ensuring adaptive feature scaling: $\hat{\pmb{F}}_{s} = W\cdot \hat{\pmb{F}}_{s}$ . The updated support features are then averaged across the K-shot dimension to derive refined prototypes: $P = \frac{1}{K}\sum_{i=1}^{K}\hat{\pmb{F}}_{s}$ . Finally, the query-aware prototype refinement is performed using a weighted combination of the refined prototypes and the original prototypes: $\hat{\pmb{P}} = \alpha\cdot\pmb{P} + (1-\alpha)\cdot\frac{1}{K}\sum_{i=1}^{K}\pmb{F}_{s}$ . This final prototype representation retains both source-domain knowledge and query-specific adaptability, effectively enhancing cross-domain few-shot detection performance. + +# 5.5.2. Training Details + +The proposed modules are fine-tuned on novel support images, with the base DE-ViT pretrained on COCO taken as initialization. The SGD with a learning rate of 0.002 is used as the optimizer. All experiments are conducted on two RTX3090 GPUs. The mAPs for 1/5/10 shots are reported. + +# Acknowledgments + +INSAIT, Sofia University "St. Kliment Ohridski". Partially funded by the Ministry of Education and Science of Bulgaria's support for INSAIT as part of the Bulgarian National Roadmap for Research Infrastructure. This work was partially supported by the Humboldt Foundation. We thank the NTIRE 2025 sponsors: ByteDance, Meituan, Kuaishou, and University of Wurzburg (Computer Vision Lab). + +# A. Teams and affiliations + +# NTIRE 2025 team + +Title: NTIRE 2025 Challenge on Cross-Domain Few-Shot Object Detection: Methods and Results. + +# Members: + +Yuqian Fu1 (yuqian.fu@insait.ai), + +Xingyu Qiu² (xyqiu24@m.fudan.edu.cn), + +Bin Ren $^{3,4}$ (bin.ren@unitn.it), + +Yanwei $\mathrm{Fu}^2$ (yanweifu@fudan.edu.cn), + +Radu Timofte $^{5}$ (radu.timofte@uni-wuerzburg.de), + +Nicu Sebe4 (niculae.sebe@unitn.it), + +Ming-Hsuan Yang $^{6}$ (mhyang@ucmerced.edu), + +Luc Van Gool1 (luc.vangool@insait.ai) + +# Affiliations: + +1 INSAIT, Sofia University St. Kliment Ohridski, Bulgaria +$^{2}$ Fudan University, China +3 University of Pisa, Italy +4 University of Trento, Italy +5 Computer Vision Lab, University of Würzburg, Germany +6 University of California at Merced, United States + +# MoveFree + +Title: Marrying MoE-powered Grounding DINO with Self-training for Cross-domain Few-shot Object Detection + +# Members: + +Kaijin Zhang $^{1}$ (zhang.kaijin1@zte.com.cn), + +Qingpeng Nong1 (nong.qingpeng@zte.com.cn), + +Xiugang Dong $^{1}$ (dong.xiugang20@zte.com.cn), + +Hong Gao $^{1}$ (gao.hong@zte.com.cn), + +Xiangsheng Zhou1 (zhou.xiangsheng@zte.com.cn) + +# Affiliations: + +1 Central R & D Institute, ZTE + +# AI4EarthLab + +Title: Enhance Then Search: An Augmentation-Search Strategy with Foundation Models for Cross-Domain Few-Shot Object Detection + +# Members: + +Jiancheng Pan1 (jiancheng.pan_plus@gmail.com), + +Yanxing Liu $^{2}$ (liuyanxing21@mails.ucas.ac.cn), + +Xiao He $^{3}$ (xiaohewhu@163.com), + +Jiahao Li1 (lijiahao23@mails.tsinghua.edu.cn), + +Yuze Sun $^{1}$ (syz23@mails.tsinghua.edu.cn), + +Xiaomeng Huang $^{1}$ (hxm@tsinghua.edu.cn) + +# Affiliations: + +$^{1}$ Tsinghua University +$^{2}$ University of Chinese Academy of Sciences +$^{3}$ Wuhan University + +# IDCFS + +Title: Pseudo-Label Driven Vision-Language Grounding for Cross-Domain Few-Shot Object Detection + +# Members: + +Zhenyu Zhang $^{1}$ (m202273680@hust.edu.cn), + +Ran Ma1 (ranma@hust.edu.cn), + +Yuhan Liu1 (yuhan.liu@hust.edu.cn), + +Zijian Zhuang $^{1}$ (zhuangzj@hust.edu.cn), + +Shuai Yi $^{1}$ (yishuai@hust.edu.cn), + +Yixiong Zou1 (yixiongz@hust.edu.cn) + +# Affiliations: + +1 School of Computer Science and Technology, Huazhong University of Science and Technology + +# FDUROILab_Lenovo + +Title: Efficient Tuning and MLLM-Based Post Prcessing for CDFSOD + +# Members: + +Lingyi Hong1 (lyhong22@m.fudan.edu.cn), + +Mingxi Cheng1(mxchen24@m.fudan.edu.cn), + +Runze Li $^{2}$ (lirz7@lenovo.com), + +Xingdong Sheng $^{2}$ (shengxd1@lenovo.com), + +Wenqiang Zhang $^{1,3}$ (wqzhang@fudan.edu.cn) + +# Affiliations: + +$^{1}$ Shanghai Key Lab of Intelligent Information Processing, School of Computer Science, Fudan University +2 Lenovo Research +3 Engineering Research Center of AI & Robotics, Ministry of Education, Academy for Engineering & Technology, Fudan University + +# HUSTLab + +Title: Prompt and Finetune Grounding DINO for Cross-Domain Few-shot Object Detection + +# Members: + +Weisen Chen $^{1}$ (U202115027@hust.edu.cn), + +Yongxin Yan $^{1}$ (2585856499@qq.com), + +Xinguo Chen $^{2}$ (327715@whut.edu.cn), + +Yuanjie Shao $^{1}$ (shaoyuanjie@hust.edu.cn), + +Zhengrong Zuo $^{1}$ (zhrzuo@main.hust.edu.cn), + +Nong Sang $^{1}$ (nsang@hust.edu.cn) + +# Affiliations: + +1 School of Artificial Intelligence and Automation, Huazhong University of Science and Technology +$^{2}$ School of Information Engineering, Wuhan University of Technology + +# TongjiLab + +Title: ProtoDINO: Cross-Domain Few-Shot Object Detection via GroundingDINO and CLIP-Based Prototypes + +# Members: + +Hao $\mathbf{W}\mathbf{u}^{1}$ (haowu@tongji.edu.cn), + +Haoran Sun + +Affiliations: + +$^{1}$ Tongji University + +# Manifold + +Title: CDFSOD Challenge: Using Grounding-DINO Proposals and ResNet Embeddings + +Members: + +Shuming Hu1 (hsm123@nudt.edu.cn), + +Yan Zhang1, + +Zhiguang Shi1, + +Yu Zhang1, + +Chao Chen1, + +Tao Wang + +Affiliations: + +$^{1}$ National University of Defense Technology + +# MXT + +Title: Domain Adaptation Enhancement Module (DAEM) for Cross-Domain Few-Shot Object Detection + +Members: + +Da Feng $^{1}$ (072108208@fzu.edu.cn), + +Linhai Zhuo $^{1}$ (534537916@qq.com), + +Ziming Lin + +Affiliations: + +$^{1}$ Fuzhou University + +# X-Few + +Title: IFC: Instance Feature Caching for Cross-Domain Few-Shot Object Detection + +Members: + +Yali Huang $^{1}$ (hyl2024@gs.zzu.edu.cn), + +Jie Mei $^{1}$ (mj123123@gs.zzu.edu.cn), + +Yiming Yang1 (yangyim637@gmail.com), + +Mi Guo $^{1}$ (mimi987836724@gs.zzu.edu.cn), + +Mingyuan Jiu $^{1,2,3}$ (iemyjiu@zzu.edu.cn), + +Mingliang Xu $^{1,2,3}$ (iexumingliang@zzu.edu.cn) + +Affiliations: + +$^{1}$ School of Computer and Artificial Intelligence, Zhengzhou University +$^{2}$ Engineering Research Center of Intelligent Swarm Systems, Ministry of Education, Zhengzhou University +$^{3}$ National SuperComputing Center in Zhengzhou + +# MM + +Title: DFE-ViT: Dual Feature Enhancement Network for Cross-Domain Few-Shot Object Detection. + +# Members: + +Maomao Xiong $^{1}$ (202314866@mail.sdu.edu.cn), + +Qunshu Zhang $^{1}$ (202414859@mail.sdu.edu.cn), + +Xinyu Cao $^{1}$ (202414842@mail.sdu.edu.cn) + +Affiliations: + +1 Shandong University + +# FSV + +Title: Enhanced Prototype-based Cross-domain Few-shot Object Detection + +Members: + +Yuqing Yang1 (yyqyang101@gmail.com) + +Affiliations: + +1 George Mason University + +# IPC + +Title: Test-time Adaptation Strategy for Cross-Domain Few-Shot Object Detection + +Members: + +Dianmo Sheng $^{1}$ (dmsheng@mail.ustc.edu.cn), + +Xuanpu Zhao1, + +Zhiyu Li1, + +Xuyang Ding + +Affiliations: + +1 University of Science and Technology of China + +# LJY + +Title: Similarity-Calibrated Prototype Refinement for Cross-Domain Few-Shot Object Detection + +Members: + +Wenqian Li (wenqianli.li@seu.edu.cn) + +Affiliations: + +Southeast University + +# References + +[1] Shuai Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Sibo Song, Kai Dang, Peng Wang, Shijie Wang, Jun Tang, et al. Qwen2. 5-vl technical report. arXiv preprint arXiv:2502.13923, 2025.8, 9 +[2] Weilin Cai, Juyong Jiang, Fan Wang, Jing Tang, Sunghun Kim, and Jiayi Huang. A survey on mixture of experts. arXiv preprint arXiv:2407.06204, 2024. 5 +[3] Zheng Chen, Kai Liu, Jue Gong, Jingkai Wang, Lei Sun, Zongwei Wu, Radu Timofte, Yulun Zhang, et al. Ntire 2025 challenge on image super-resolution $(\times 4)$ : Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2 +[4] Zheng Chen, Jingkai Wang, Kai Liu, Jue Gong, Lei Sun, Zongwei Wu, Radu Timofte, Yulun Zhang, et al. Ntire 2025 challenge on real-world face restoration: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2 +[5] Marcos Conde, Radu Timofte, et al. Ntire 2025 challenge on raw image restoration and super-resolution. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2 +[6] Marcos Conde, Radu Timofte, et al. Raw image reconstruction from RGB on smartphones. ntire 2025 challenge report. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2 +[7] Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. Bert: Pre-training of deep bidirectional transformers for language understanding. In Proceedings of the 2019 conference of the North American chapter of the association for computational linguistics: human language technologies, volume 1 (long and short papers), pages 4171–4186, 2019. 7 +[8] Geir Drange. Arthropod taxonomy orders object detection dataset. In https://doi.org/10.34740/kaggle/dsv/1240192, 2019.2 +[9] Egor Ershov, Sergey Korchagin, Alexei Khalin, Artyom Panshin, Arseniy Terekhin, Ekaterina Zaychenkova, Georgiy Lobarev, Vsevolod Plokhotnyuk, Denis Abramov, Elisey Zhdanov, Sofia Dorogova, Yasin Mamedov, Nikola Banic, Georgii Perevozhikov, Radu Timofte, et al. Ntire 2025 challenge on night photography rendering. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2 +[10] William Fedus, Barret Zoph, and Noam Shazeer. Switch transformers: Scaling to trillion parameter models with simple and efficient sparsity. Journal of Machine Learning Research, 23(120):1-39, 2022. 5 +[11] Shenghao Fu, Qize Yang, Qijie Mo, Junkai Yan, Xihan Wei, Jingke Meng, Xiaohua Xie, and Wei-Shi Zheng. Llmdet: Learning strong open-vocabulary object detectors under the supervision of large language models. arXiv preprint arXiv:2501.18954, 2025. 8, 9 +[12] Yuqian Fu, Yanwei Fu, and Yu-Gang Jiang. Meta-fdmixup: Cross-domain few-shot learning guided by labeled target + +data. In Proceedings of the 29th ACM international conference on multimedia, pages 5326-5334, 2021. 1 +[13] Yuqian Fu, Yu Xie, Yanwei Fu, Jingjing Chen, and Yu-Gang Jiang. Me-d2n: Multi-expert domain decompositional network for cross-domain few-shot learning. In Proceedings of the 30th ACM international conference on multimedia, pages 6609-6617, 2022. +[14] Yuqian Fu, Yu Xie, Yanwei Fu, and Yu-Gang Jiang. Styleadv: Meta style adversarial training for cross-domain few-shot learning. In CVPR, 2023. 1 +[15] Yuqian Fu, Yu Wang, Yixuan Pan, Lian Huai, Xingyu Qiu, Zeyu Shangguan, Tong Liu, Yanwei Fu, Luc Van Gool, and Xingqun Jiang. Cross-domain few-shot object detection via enhanced open-set object detector. In European Conference on Computer Vision, pages 247-264. Springer, 2024. 1, 2, 4, 6 +[16] Yuqian Fu, Xingyu Qiu, Bin Ren Yanwei Fu, Radu Timofte, Nicu Sebe, Ming-Hsuan Yang, Luc Van Gool, et al. Ntire 2025 challenge on cross-domain few-shot object detection: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2 +[17] Golnaz Ghiasi, Yin Cui, Aravind Srinivas, Rui Qian, Tsung-Yi Lin, Ekin D Cubuk, Quoc V Le, and Barret Zoph. Simple copy-paste is a strong data augmentation method for instance segmentation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 2918-2928, 2021. 9 +[18] Yunhui Guo, Noel C Codella, Leonid Karlinsky, James V Codella, John R Smith, Kate Saenko, Tajana Rosing, and Rogerio Feris. A broader study of cross-domain few-shot learning. In Computer vision-ECCV 2020: 16th European conference, Glasgow, UK, August 23-28, 2020, proceedings, part XXVII 16, pages 124-141. Springer, 2020. 1 +[19] Shuhao Han, Haotian Fan, Fangyuan Kong, Wenjie Liao, Chunle Guo, Chongyi Li, Radu Timofte, et al. Ntire 2025 challenge on text to image generation model quality assessment. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2 +[20] Meng-Ru Hsieh, Yen-Liang Lin, and Winston H Hsu. Drone-based object counting by spatially regularized regional proposal network. In Proceedings of the IEEE international conference on computer vision, pages 4145-4153, 2017. 1, 2, 8, 9, 13 +[21] Edward J Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, Weizhu Chen, et al. Lora: Low-rank adaptation of large language models. *ICLR*, 1(2):3, 2022. 7 +[22] Gabriel Ilharco, Mitchell Wortsman, Ross Wightman, Cade Gordon, Nicholas Carlini, Rohan Taori, Achal Dave, Vaishaal Shankar, Hongseok Namkoong, John Miller, Hannaneh Hajishirzi, Ali Farhadi, and Ludwig Schmidt. Openclip, 2021. 10 +[23] Naoto Inoue, Ryosuke Furuta, Toshihiko Yamasaki, and Kiyoharu Aizawa. Cross-domain weakly-supervised object detection through progressive domain adaptation. In CVPR, 2018. 2 + +[24] Varun Jain, Zongwei Wu, Quan Zou, Louis Florentin, Henrik Turbell, Sandeep Siddhartha, Radu Timofte, et al. Ntire 2025 challenge on video quality enhancement for video conferencing: Datasets, methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2 +[25] Albert Q Jiang, Alexandre Sablayrolles, Antoine Roux, Arthur Mensch, Blanche Savary, Chris Bamford, Devendra Singh Chaplot, Diego de las Casas, Emma Bou Hanna, Florian Bressand, et al. Mixtral of experts. arXiv preprint arXiv:2401.04088, 2024. 5 +[26] Lihao Jiang, Yi Wang, Qi Jia, Shengwei Xu, Yu Liu, Xin Fan, Haojie Li, Risheng Liu, Xinwei Xue, and Ruili Wang. Underwater species detection using channel sharpening attention. In ACM MM, 2021. 2 +[27] Aishwarya Kamath, Mannat Singh, Yann LeCun, Gabriel Synnaeve, Ishan Misra, and Nicolas Carion. Mdetr-modulated detection for end-to-end multi-modal understanding. In Proceedings of the IEEE/CVF international conference on computer vision, pages 1780-1790, 2021. 7, 8 +[28] Mona Köhler, Markus Eisenbach, and Horst-Michael Gross. Few-shot object detection: A comprehensive survey. IEEE Transactions on Neural Networks and Learning Systems, 2023. 1 +[29] Ranjay Krishna, Yuke Zhu, Oliver Groth, Justin Johnson, Kenji Hata, Joshua Kravitz, Stephanie Chen, Yannis Kalantidis, Li-Jia Li, David A Shamma, et al. Visual genome: Connecting language and vision using crowdsourced dense image annotations. International journal of computer vision, 123:32-73, 2017. 7 +[30] Alex Krizhevsky, Ilya Sutskever, and Geoffrey E Hinton. Imagenet classification with deep convolutional neural networks. In Advances in Neural Information Processing Systems, pages 1097-1105, 2012. +[31] Alina Kuznetsova, Hassan Rom, Neil Alldrin, Jasper Uijlings, Ivan Krasin, Jordi Pont-Tuset, Shahab Kamali, Stefan Popov, Matteo Malloci, Alexander Kolesnikov, et al. The open images dataset v4: Unified image classification, object detection, and visual relationship detection at scale. International journal of computer vision, 128(7):1956-1981, 2020. 7, 8 +[32] Sangmin Lee, Eunpil Park, Angel Canelo, Hyunhee Park, Youngjo Kim, Hyungju Chun, Xin Jin, Chongyi Li, Chun-Le Guo, Radu Timofte, et al. Ntire 2025 challenge on efficient burst hdr and restoration: Datasets, methods, and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2 +[33] Chunyuan Li, Haotian Liu, Liunian Li, Pengchuan Zhang, Jyoti Aneja, Jianwei Yang, Ping Jin, Houdong Hu, Zicheng Liu, Yong Jae Lee, et al. Elevater: A benchmark and toolkit for evaluating language-augmented visual models. Advances in Neural Information Processing Systems, 35:9287-9301, 2022. 8 +[34] Ke Li, Gang Wan, Gong Cheng, Liqui Meng, and Junwei Han. Object detection in optical remote sensing images: A survey and a new benchmark. ISPRS, 2020. 2 +[35] Liunian Harold Li, Pengchuan Zhang, Haotian Zhang, Jianwei Yang, Chunyuan Li, Yiwu Zhong, Lijuan Wang, Lu + +Yuan, Lei Zhang, Jenq-Neng Hwang, et al. Grounded language-image pre-training. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 10965-10975, 2022. 4, 7 +[36] Wei-Hong Li, Xialei Liu, and Hakan Bilen. Cross-domain few-shot learning with task-specific adapters. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 7161-7170, 2022. 1 +[37] Xin Li, Yeying Jin, Xin Jin, Zongwei Wu, Bingchen Li, Yufei Wang, Wenhan Yang, Yu Li, Zhibo Chen, Bihan Wen, Robby Tan, Radu Timofte, et al. Ntire 2025 challenge on day and night raindrop removal for dual-focused images: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2 +[38] Xin Li, Xijun Wang, Bingchen Li, Kun Yuan, Yizhen Shao, Suhang Yao, Ming Sun, Chao Zhou, Radu Timofte, and Zhibo Chen. Ntire 2025 challenge on short-formUGC video quality assessment and enhancement: Kwaisr dataset and study. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2 +[39] Xin Li, Kun Yuan, Bingchen Li, Fengbin Guan, Yizhen Shao, Zihao Yu, Xijun Wang, Yiting Lu, Wei Luo, Suhang Yao, Ming Sun, Chao Zhou, Zhibo Chen, Radu Timofte, et al. Ntire 2025 challenge on short-formUGC video quality assessment and enhancement: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2 +[40] Jie Liang, Radu Timofte, Qiaosi Yi, Zhengqiang Zhang, Shuaizheng Liu, Lingchen Sun, Rongyuan Wu, Xindong Zhang, Hui Zeng, Lei Zhang, et al. Ntire 2025 the 2nd restore any image model (RAIM) in the wild challenge. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2 +[41] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dólar, and C Lawrence Zitnick. Microsoft coco: Common objects in context. In Computer vision-ECCV 2014: 13th European conference, zurich, Switzerland, September 6-12, 2014, proceedings, part v 13, pages 740-755. Springer, 2014. 1, 2, 8 +[42] Aixin Liu, Bei Feng, Bin Wang, Bingxuan Wang, Bo Liu, Chenggang Zhao, Chengqi Dengr, Chong Ruan, Damai Dai, Daya Guo, et al. Deepseek-v2: A strong, economical, and efficient mixture-of-experts language model. arXiv preprint arXiv:2405.04434, 2024. 5 +[43] Shilong Liu, Zhaoyang Zeng, Tianhe Ren, Feng Li, Hao Zhang, Jie Yang, Qing Jiang, Chunyuan Li, Jianwei Yang, Hang Su, et al. Grounding dino: Marrying dino with grounded pre-training for open-set object detection. In European Conference on Computer Vision, pages 38-55. Springer, 2024. 4, 6, 7, 9, 10 +[44] Xiaohong Liu, Xiongkuo Min, Qiang Hu, Xiaoyun Zhang, Jie Guo, et al. Ntire 2025 XGC quality assessment challenge: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2 + +[45] Xiaoning Liu, Zongwei Wu, Florin-Alexandru Vasluianu, Hailong Yan, Bin Ren, Yulun Zhang, Shuhang Gu, Le Zhang, Ce Zhu, Radu Timofte, et al. Ntire 2025 challenge on low light image enhancement: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2 +[46] Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, and Baining Guo. Swin transformer: Hierarchical vision transformer using shifted windows. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 10012-10022, 2021. 7, 9 +[47] Alexander Neubeck and Luc Van Gool. Efficient nonmaximum suppression. In 18th international conference on pattern recognition (ICPR'06), pages 850-855. IEEE, 2006. 7 +[48] Maxime Oquab, Timothee Darct, Theo Moutakanni, Huy Vo, Marc Szafraniec, Vasil Khalidov, Pierre Fernandez, Daniel Haziza, Francisco Massa, Alaaeldin El-Nouby, et al. Dinov2: Learning robust visual features without supervision. arXiv preprint arXiv:2304.07193, 2023. 14 +[49] Vicente Ordonez, Girish Kulkarni, and Tamara Berg. Im2text: Describing images using 1 million captioned photographs. Advances in neural information processing systems, 24, 2011. 7 +[50] Hongpeng Pan, Shifeng Yi, Shouwei Yang, Lei Qi, Bing Hu, Yi Xu, and Yang Yang. The solution for cvpr2024 foundational few-shot object detection challenge. arXiv preprint arXiv:2406.12225, 2024. 9 +[51] Jiancheng Pan, Yanxing Liu, Yuqian Fu, Muyuan Ma, Jiaohao Li, Danda Pani Paudel, Luc Van Gool, and Xiaomeng Huang. Locate anything on earth: Advancing open-vocabulary object detection for remote sensing community, 2024. 6 +[52] Jiancheng Pan, Muyuan Ma, Qing Ma, Cong Bai, and Shengyong Chen. Pir: Remote sensing image-text retrieval with prior instruction representation learning, 2024. 6 +[53] Limeng Qiao, Yuxuan Zhao, Zhiyuan Li, Xi Qiu, Jianan Wu, and Chi Zhang. Defrcn: Decoupled faster r-cnn for few-shot object detection. In ICCV, 2021. 1 +[54] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PmLR, 2021. 10 +[55] Bin Ren, Yahui Liu, Yue Song, Wei Bi, Rita Cucchiara, Nicu Sebe, and Wei Wang. Masked jigsaw puzzle: A versatile position embedding for vision transformers. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 20382-20391, 2023. 1 +[56] Bin Ren, Yawei Li, Jingyun Liang, Rakesh Ranjan, Mengyuan Liu, Rita Cucchiara, Luc V Gool, Ming-Hsuan Yang, and Nicu Sebe. Sharing key semantics in transformer makes efficient image restoration. Advances in Neural Information Processing Systems, 37:7427-7463, 2024. 1 +[57] Bin Ren, Hang Guo, Lei Sun, Zongwei Wu, Radu Timofte, Yawei Li, et al. The tenth nitre 2025 efficient superresolution challenge report. In Proceedings of the IEEE/CVF + +Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2 +[58] Tianhe Ren, Qing Jiang, Shilong Liu, Zhaoyang Zeng, Wenlong Liu, Han Gao, Hongjie Huang, Zhengyu Ma, Xiaoke Jiang, Yihao Chen, et al. Grounding dino 1.5: Advance the" edge" of open-set object detection. arXiv preprint arXiv:2405.10300, 2024. 4 +[59] Xiaoqian Ruan and Wei Tang. Fully test-time adaptation for object detection. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1038-1047, 2024. 15 +[60] Inkyu Sa, Zongyuan Ge, Feras Dayoub, Ben Upcroft, Tristan Perez, and Chris McCool. Deepfruits: A fruit detection system using deep neural networks. sensors, 16(8):1222, 2016. 1, 2, 8, 9, 13 +[61] Nickolay Safonov, Alexey Bryntsev, Andrey Moskalenko, Dmitry Kulikov, Dmitriy Vatolin, Radu Timofte, et al. Ntire 2025 challenge on UGC video enhancement: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2 +[62] Alzayat Saleh, Issam H Laradji, Dmitry A Konovalov, Michael Bradley, David Vazquez, and Marcus Sheaves. A realistic fish-habitat dataset to evaluate algorithms for underwater visual analysis. Scientific Reports, 2020. 2 +[63] Zeyu Shangguan and Mohammad Rostami. Identification of novel classes for improving few-shot object detection. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 3356-3366, 2023. 1 +[64] Zeyu Shangguan and Mohammad Rostami. Improved region proposal network for enhanced few-shot object detection. Neural Networks, 180:106699, 2024. 1 +[65] Shuai Shao, Zeming Li, Tianyuan Zhang, Chao Peng, Gang Yu, Xiangyu Zhang, Jing Li, and Jian Sun. Objects365: A large-scale, high-quality dataset for object detection. In Proceedings of the IEEE/CVF international conference on computer vision, pages 8430-8439, 2019. 8 +[66] Jake Snell, Kevin Swersky, and Richard Zemel. Prototypical networks for few-shot learning. Advances in neural information processing systems, 30, 2017. 10 +[67] Kechen Song and Yunhui Yan. A noise robust method based on completed local binary patterns for hot-rolled steel strip surface defects. Applied Surface Science, 2013. 2 +[68] Bo Sun, Banghuai Li, Shengcai Cai, Ye Yuan, and Chi Zhang. Fsce: Few-shot object detection via contrastive proposal encoding. In CVPR, 2021. 1 +[69] Lei Sun, Andrea Alfarano, Peiqi Duan, Shaolin Su, Kaiwei Wang, Boxin Shi, Radu Timofte, Danda Pani Paudel, Luc Van Gool, et al. Ntire 2025 challenge on event-based image deblurring: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2 +[70] Lei Sun, Hang Guo, Bin Ren, Luc Van Gool, Radu Timofte, Yawei Li, et al. The tenth nitre 2025 image denoising challenge report. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2 + +[71] Hao Tang, Chengcheng Yuan, Zechao Li, and Jinhui Tang. Learning attention-guided pyramidal features for few-shot fine-grained recognition. Pattern Recognition, 130:108792, 2022. 1 +[72] Hung-Yu Tseng, Hsin-Ying Lee, Jia-Bin Huang, and Ming-Hsuan Yang. Cross-domain few-shot classification via learned feature-wise transformation. arXiv preprint arXiv:2001.08735, 2020. 1 +[73] Florin-Alexandru Vasluianu, Tim Seizinger, Zhuyun Zhou, Cailian Chen, Zongwei Wu, Radu Timofte, et al. Ntire 2025 image shadow removal challenge report. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2 +[74] Florin-Alexandru Vasluianu, Tim Seizinger, Zhuyun Zhou, Zongwei Wu, Radu Timofte, et al. Ntire 2025 ambient lighting normalization challenge. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2 +[75] Xin Wang, Thomas E Huang, Trevor Darrell, Joseph E Gonzalez, and Fisher Yu. Frustratingly simple few-shot object detection. arXiv preprint arXiv:2003.06957, 2020. 1 +[76] Xinkuang Wang, Wenjing Li, and Zhongcheng Wu. Cardd: A new dataset for vision-based car damage detection. IEEE Transactions on Intelligent Transportation Systems, 24(7): 7202-7214, 2023. 1, 2, 9, 13 +[77] Yingqian Wang, Zhengyu Liang, Fengyuan Zhang, Lvli Tian, Longguang Wang, Juncheng Li, Jungang Yang, Radu Timofte, Yulan Guo, et al. Ntire 2025 challenge on light field image super-resolution: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2 +[78] Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chau-mond, Clement Delangue, Anthony Moi, Pierrick Cistac, Tim Rault, Rémi Louf, Morgan Funtowicz, et al. Huggingface's transformers: State-of-the-art natural language processing. arXiv preprint arXiv:1910.03771, 2019. 8 +[79] Dongxian Wu, Shu-Tao Xia, and Yisen Wang. Adversarial weight perturbation helps robust generalization. Advances in neural information processing systems, 33:2958-2969, 2020. 9 +[80] Fuzhao Xue, Zian Zheng, Yao Fu, Jinjie Ni, Zangwei Zheng, Wangchunshu Zhou, and Yang You. Openmoe: An early effort on open mixture-of-experts language models. arXiv preprint arXiv:2402.01739, 2024. 5 +[81] Kangning Yang, Jie Cai, Ling Ouyang, Florin-Alexandru Vasluianu, Radu Timofte, Jiaming Ding, Huiming Sun, Lan Fu, Jinlong Li, Chiu Man Ho, Zibo Meng, et al. Ntire 2025 challenge on single image reflection removal in the wild: Datasets, methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2 +[82] Pierluigi Zama Ramirez, Fabio Tosi, Luigi Di Stefano, Radu Timofte, Alex Costanzino, Matteo Poggi, Samuele Salti, Stefano Mattoccia, et al. Ntire 2025 challenge on hr depth from images of specular and transparent surfaces. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2 + +[83] Zican Zha, Hao Tang, Yunlian Sun, and Jinhui Tang. Boosting few-shot fine-grained recognition with background suppression and foreground alignment. IEEE Transactions on Circuits and Systems for Video Technology, 33(8):3947-3961, 2023. 1 +[84] Ji Zhang, Jingkuan Song, Lianli Gao, and Hengtao Shen. Free-lunch for cross-domain few-shot learning: Style-aware episodic training with robust contrastive learning. In Proceedings of the 30th ACM international conference on multimedia, pages 2586-2594, 2022. 1 +[85] Xinyu Zhang, Yuhan Liu, Yuting Wang, and Abdeslam Boularias. Detect everything with few examples. arXiv preprint arXiv:2309.12969, 2023. 1, 3 +[86] Linhai Zhuo, Yuqian Fu, Jingjing Chen, Yixin Cao, and YuGang Jiang. Tgdm: Target guided dynamic mixup for cross-domain few-shot learning. In Proceedings of the 30th ACM International Conference on Multimedia, pages 6368-6376, 2022. 1 +[87] Linhai Zhuo, Yuqian Fu, Jingjing Chen, Yixin Cao, and YuGang Jiang. Unified view empirical study for large pretrained model on cross-domain few-shot learning. ACM Transactions on Multimedia Computing, Communications and Applications, 20(9):1-18, 2024. 1 \ No newline at end of file diff --git a/data/2025/2504_10xxx/2504.10685/images/01810eb83ce91b149e958f580b0174e2285a8dc0d030ed2e1c8e83e917133bfe.jpg b/data/2025/2504_10xxx/2504.10685/images/01810eb83ce91b149e958f580b0174e2285a8dc0d030ed2e1c8e83e917133bfe.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2f2c9b0446c90aa538680ca9b084a49647e8f000 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10685/images/01810eb83ce91b149e958f580b0174e2285a8dc0d030ed2e1c8e83e917133bfe.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1e426d5b17213777e4bf2346264dc9e0ffe84dcc28b8b14c74ee8effcfdb5ce0 +size 29522 diff --git a/data/2025/2504_10xxx/2504.10685/images/02bfe67dc0273f0b6bb1570a13e9e272e3fc2bb1f70285eea045d85d22cd74f4.jpg b/data/2025/2504_10xxx/2504.10685/images/02bfe67dc0273f0b6bb1570a13e9e272e3fc2bb1f70285eea045d85d22cd74f4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f02ad19055fc43b96ffdf3dc84db97ecacb7987a --- /dev/null +++ b/data/2025/2504_10xxx/2504.10685/images/02bfe67dc0273f0b6bb1570a13e9e272e3fc2bb1f70285eea045d85d22cd74f4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bc6f14e2a36a6732c7f4c38530858adad803c67a378b5e46d03468dde1109072 +size 18483 diff --git a/data/2025/2504_10xxx/2504.10685/images/0693ff91746e274d37bc3a660190108db992ba7f396a2a55c15d92d173af7306.jpg b/data/2025/2504_10xxx/2504.10685/images/0693ff91746e274d37bc3a660190108db992ba7f396a2a55c15d92d173af7306.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a3a6150c9ea302cb0394fe92b06fbed29be7d069 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10685/images/0693ff91746e274d37bc3a660190108db992ba7f396a2a55c15d92d173af7306.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8ebca5c34dd99313e8f59b6a78bf78ca5bc717e88e8f9d9cd346a9bf56eb624f +size 4290 diff --git a/data/2025/2504_10xxx/2504.10685/images/0b7da84e2775e9ddbfcbf86d3acb6b3e314368a8cf2dd3dcc8102d0f04f792a6.jpg b/data/2025/2504_10xxx/2504.10685/images/0b7da84e2775e9ddbfcbf86d3acb6b3e314368a8cf2dd3dcc8102d0f04f792a6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5495fb13e48d3db293c5f96b2bfd1dd9c82a38b4 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10685/images/0b7da84e2775e9ddbfcbf86d3acb6b3e314368a8cf2dd3dcc8102d0f04f792a6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e6ca308f8268e1eeaa07d36a2a0c3dd8ec38f9e057fafb21dc63fafb56d0bc55 +size 34191 diff --git a/data/2025/2504_10xxx/2504.10685/images/0ccac6ed1ed6b29bebe32679af7dcabc465e116a0d16d870635d362ad7bd1b03.jpg b/data/2025/2504_10xxx/2504.10685/images/0ccac6ed1ed6b29bebe32679af7dcabc465e116a0d16d870635d362ad7bd1b03.jpg new file mode 100644 index 0000000000000000000000000000000000000000..229dd3e4180b6e0e9fb3f6e8c25b748e0d0d7051 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10685/images/0ccac6ed1ed6b29bebe32679af7dcabc465e116a0d16d870635d362ad7bd1b03.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7f71465e283445f9abc195206a5061377d2c6ceabeab64ce55dab5e89f102b86 +size 32786 diff --git a/data/2025/2504_10xxx/2504.10685/images/13a996835fc99a32166076ccff99c3a26e87309aafa00d6ad1749b0033fd6aab.jpg b/data/2025/2504_10xxx/2504.10685/images/13a996835fc99a32166076ccff99c3a26e87309aafa00d6ad1749b0033fd6aab.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fe28803fb220e069bd28aa46014d2f86688a8305 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10685/images/13a996835fc99a32166076ccff99c3a26e87309aafa00d6ad1749b0033fd6aab.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:117189c0c92256ec48e0670fb97c1b3a7b8250e9d2a04bd898167a10bbb7b94f +size 4938 diff --git a/data/2025/2504_10xxx/2504.10685/images/153d0b654b7da804fdc9de38e58dac2c9c03e4be0664d437046d606714320426.jpg b/data/2025/2504_10xxx/2504.10685/images/153d0b654b7da804fdc9de38e58dac2c9c03e4be0664d437046d606714320426.jpg new file mode 100644 index 0000000000000000000000000000000000000000..057261c4452da40f005c9dfc7effa64d8d547813 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10685/images/153d0b654b7da804fdc9de38e58dac2c9c03e4be0664d437046d606714320426.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:97bbfc632fd4e278df2404e583a953ee16b3b6dff1342538d38355df6fcd5ef7 +size 4534 diff --git a/data/2025/2504_10xxx/2504.10685/images/1ab852e9315483efc47560a3dcb6a31d41a3b83a16d826a076224a6a2523400e.jpg b/data/2025/2504_10xxx/2504.10685/images/1ab852e9315483efc47560a3dcb6a31d41a3b83a16d826a076224a6a2523400e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3e518d77a5fbe1d1f3d2b03ffb79e6131834070b --- /dev/null +++ b/data/2025/2504_10xxx/2504.10685/images/1ab852e9315483efc47560a3dcb6a31d41a3b83a16d826a076224a6a2523400e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8ddadfd69d7e821b1051b1576716205ff0a87a1fb0efa7fcb5a38c0b4ba44008 +size 4607 diff --git a/data/2025/2504_10xxx/2504.10685/images/1bc0d3ab7ab85fb2208d5f61d937c337f5fbc4fba1fc2687c691f111074cfeb2.jpg b/data/2025/2504_10xxx/2504.10685/images/1bc0d3ab7ab85fb2208d5f61d937c337f5fbc4fba1fc2687c691f111074cfeb2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..17a75bcc8f67745e8536a469777ca321653a01e5 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10685/images/1bc0d3ab7ab85fb2208d5f61d937c337f5fbc4fba1fc2687c691f111074cfeb2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c2e4be985d30ca33e8f02aa6b907451b9d506de7356915ccc833cc590d32aace +size 104089 diff --git a/data/2025/2504_10xxx/2504.10685/images/1e01daa6fc606ca5ce479b785dcee0296e8a6449dc10479dec8c41a759e28045.jpg b/data/2025/2504_10xxx/2504.10685/images/1e01daa6fc606ca5ce479b785dcee0296e8a6449dc10479dec8c41a759e28045.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1633ef03527670f4182278606b66c6f2be485116 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10685/images/1e01daa6fc606ca5ce479b785dcee0296e8a6449dc10479dec8c41a759e28045.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:445220dd44777d9cfc2f605d9686a5b1db1442a31a3dafb4843234a73b4ebe2a +size 5034 diff --git a/data/2025/2504_10xxx/2504.10685/images/2b50d3312342b5cd025a42c775638c146fa67b6a97da6349fafce67842eb6f89.jpg b/data/2025/2504_10xxx/2504.10685/images/2b50d3312342b5cd025a42c775638c146fa67b6a97da6349fafce67842eb6f89.jpg new file mode 100644 index 0000000000000000000000000000000000000000..682fd3a31c0345e428d1cedd9a5bcbbacd902545 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10685/images/2b50d3312342b5cd025a42c775638c146fa67b6a97da6349fafce67842eb6f89.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e5bfcd885e850a082af3b8647ac84fcb0958d4868ef26188759748516c28bf42 +size 10112 diff --git a/data/2025/2504_10xxx/2504.10685/images/2e776d348f36269cd21f07a116e3c3ab05ba8b3b4376ab45fc04cc9c75b69a62.jpg b/data/2025/2504_10xxx/2504.10685/images/2e776d348f36269cd21f07a116e3c3ab05ba8b3b4376ab45fc04cc9c75b69a62.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0f142816d14860b607cff6693b85071206dc3341 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10685/images/2e776d348f36269cd21f07a116e3c3ab05ba8b3b4376ab45fc04cc9c75b69a62.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c4e038ea53ec031e31f5e90edc058b62e2ef8de34a47d307dfec935ae8b185ff +size 3834 diff --git a/data/2025/2504_10xxx/2504.10685/images/34b272fd1df84c6f17f1492d20fb8dd6350fd3e78c40deb4876d1c5137bf0fdc.jpg b/data/2025/2504_10xxx/2504.10685/images/34b272fd1df84c6f17f1492d20fb8dd6350fd3e78c40deb4876d1c5137bf0fdc.jpg new file mode 100644 index 0000000000000000000000000000000000000000..53974e2bcc0c8de5fba9f2970ffa34aea2da8d66 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10685/images/34b272fd1df84c6f17f1492d20fb8dd6350fd3e78c40deb4876d1c5137bf0fdc.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:23387fa7b8f7769a3417b7f7efc65606800381d8386c01b3480be87f7b4a0652 +size 4878 diff --git a/data/2025/2504_10xxx/2504.10685/images/34d5c7327c4b757ab819bc31c0eaaba07ef74cf4419324e6da82d14ccdbd7d0e.jpg b/data/2025/2504_10xxx/2504.10685/images/34d5c7327c4b757ab819bc31c0eaaba07ef74cf4419324e6da82d14ccdbd7d0e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c1665ea08fe2983e032ccf7d6926b96754192955 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10685/images/34d5c7327c4b757ab819bc31c0eaaba07ef74cf4419324e6da82d14ccdbd7d0e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2b83b5668ca9842da9d9841b35f8286beecee75ce3852ba3a90a9d336f0e1048 +size 4473 diff --git a/data/2025/2504_10xxx/2504.10685/images/3a8a5a344c8d7a48b7252170d1c6170ca1b9ad7c65a3ecb9a38a78a25b908a37.jpg b/data/2025/2504_10xxx/2504.10685/images/3a8a5a344c8d7a48b7252170d1c6170ca1b9ad7c65a3ecb9a38a78a25b908a37.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a8f7fdadd71052810f28789801db4bdd19273717 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10685/images/3a8a5a344c8d7a48b7252170d1c6170ca1b9ad7c65a3ecb9a38a78a25b908a37.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e55b2ec6cfd2b550fcdeaec2fb0d2dd6f078f1fa362cd897e73bdfa23b53193a +size 31191 diff --git a/data/2025/2504_10xxx/2504.10685/images/3fc263e3ffbb96207506aa0a9167656cd36623edb2810e5480bf08198e1c4a2a.jpg b/data/2025/2504_10xxx/2504.10685/images/3fc263e3ffbb96207506aa0a9167656cd36623edb2810e5480bf08198e1c4a2a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8283ae209d78f181a03fb93f8981fd10831968c2 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10685/images/3fc263e3ffbb96207506aa0a9167656cd36623edb2810e5480bf08198e1c4a2a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7f692c0fbfdeabb770a860f6f6f72565fa00bcdae1c5d6a01ca45d53a8055c14 +size 34108 diff --git a/data/2025/2504_10xxx/2504.10685/images/43d8c11f2a43152c39a45bf877db09a41ebc256d1d670b340e4ee5df4386d35a.jpg b/data/2025/2504_10xxx/2504.10685/images/43d8c11f2a43152c39a45bf877db09a41ebc256d1d670b340e4ee5df4386d35a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..99f2553f7c430d7a7d0452a91ddfb796e07eb83d --- /dev/null +++ b/data/2025/2504_10xxx/2504.10685/images/43d8c11f2a43152c39a45bf877db09a41ebc256d1d670b340e4ee5df4386d35a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:248eb03873d21fa16ab54751220583dd814e51a7eb7e775fe833ec286c29558b +size 39315 diff --git a/data/2025/2504_10xxx/2504.10685/images/4f59d8b52cddbabd2debd7634615c8b6f658ffbcf42f7cee12d224037c00c21e.jpg b/data/2025/2504_10xxx/2504.10685/images/4f59d8b52cddbabd2debd7634615c8b6f658ffbcf42f7cee12d224037c00c21e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a89032fe302be68305a2711c437b2c23acc1528e --- /dev/null +++ b/data/2025/2504_10xxx/2504.10685/images/4f59d8b52cddbabd2debd7634615c8b6f658ffbcf42f7cee12d224037c00c21e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2fac6f085c2f4767f1b44477bfeace4a7dc8aa9b858987cefef5d0fd3589968b +size 16578 diff --git a/data/2025/2504_10xxx/2504.10685/images/542ce157e969fcbf8441ad06bf12f80ab75fa1dfc883ef3975a5aa3501914467.jpg b/data/2025/2504_10xxx/2504.10685/images/542ce157e969fcbf8441ad06bf12f80ab75fa1dfc883ef3975a5aa3501914467.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d0fe6d5681f8f5fde1c08ae6d8b5a42200e4449c --- /dev/null +++ b/data/2025/2504_10xxx/2504.10685/images/542ce157e969fcbf8441ad06bf12f80ab75fa1dfc883ef3975a5aa3501914467.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:13247df620dfd0556f4f85a90e9713d392f546b1da6bf87a398c0488366ef110 +size 4652 diff --git a/data/2025/2504_10xxx/2504.10685/images/6fc03298da45892b97c900a30b15ac1c7bd6dd9d8d85b21db84865a5f8f20679.jpg b/data/2025/2504_10xxx/2504.10685/images/6fc03298da45892b97c900a30b15ac1c7bd6dd9d8d85b21db84865a5f8f20679.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8f22ade50a0a2790aa8fa4b8764ad435a0cd23dd --- /dev/null +++ b/data/2025/2504_10xxx/2504.10685/images/6fc03298da45892b97c900a30b15ac1c7bd6dd9d8d85b21db84865a5f8f20679.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:65fe55c97b806efb0338a076bd91f06655a1b032ca18865bada747841535314b +size 24718 diff --git a/data/2025/2504_10xxx/2504.10685/images/763a654b6700a499b38d08923d01cb73026de94e7f0b3cbbc9baa9b28877de63.jpg b/data/2025/2504_10xxx/2504.10685/images/763a654b6700a499b38d08923d01cb73026de94e7f0b3cbbc9baa9b28877de63.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f5167d1d90755b3c4a7f7d27ae2d988304002b08 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10685/images/763a654b6700a499b38d08923d01cb73026de94e7f0b3cbbc9baa9b28877de63.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:475211173da566ecf5fc2639cefbe9a6b3e0c804648e6586bee3dffa47911a19 +size 3770 diff --git a/data/2025/2504_10xxx/2504.10685/images/8256797b5f0a9495c1811d89895022caafb9339d5fda93b87d1650cdffd08b1d.jpg b/data/2025/2504_10xxx/2504.10685/images/8256797b5f0a9495c1811d89895022caafb9339d5fda93b87d1650cdffd08b1d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..dc1fceba69a6f5662911e86202c11fcfbe959016 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10685/images/8256797b5f0a9495c1811d89895022caafb9339d5fda93b87d1650cdffd08b1d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ffbbdba73aa6f6f06f8f6270ca0f481cda41ff597a928c3ded1d512618e033d9 +size 5441 diff --git a/data/2025/2504_10xxx/2504.10685/images/85b77445909842aa7ec248e69a7ffa63394b4c2f44fa7b5de322b7355f4aed5c.jpg b/data/2025/2504_10xxx/2504.10685/images/85b77445909842aa7ec248e69a7ffa63394b4c2f44fa7b5de322b7355f4aed5c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7daee1644ea6f5e87fdfc31c5515ffe5a1031c2a --- /dev/null +++ b/data/2025/2504_10xxx/2504.10685/images/85b77445909842aa7ec248e69a7ffa63394b4c2f44fa7b5de322b7355f4aed5c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f29649fcb1baa58121f3fd1536c21b65edb82eee74e86ca85ca0661e298854a7 +size 29113 diff --git a/data/2025/2504_10xxx/2504.10685/images/8c8332ca03b01b9b660825506ac66cda3019c014dd3bbd75b026f0da59b8569c.jpg b/data/2025/2504_10xxx/2504.10685/images/8c8332ca03b01b9b660825506ac66cda3019c014dd3bbd75b026f0da59b8569c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..10fb94611f8cc2133f672c00827cf1417d0b9c58 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10685/images/8c8332ca03b01b9b660825506ac66cda3019c014dd3bbd75b026f0da59b8569c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:92b2169df3d6f55af55abee54a9a74f6417e92b1b4a1ba07d10722ffac1a24c8 +size 32998 diff --git a/data/2025/2504_10xxx/2504.10685/images/8e0a5ce6cbe5f1bc98968e9128663375e2bf4d8e17311e0087fd301456ee1c0b.jpg b/data/2025/2504_10xxx/2504.10685/images/8e0a5ce6cbe5f1bc98968e9128663375e2bf4d8e17311e0087fd301456ee1c0b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..034a602b0086f88f3f4a4c4b28f607ab34aa6085 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10685/images/8e0a5ce6cbe5f1bc98968e9128663375e2bf4d8e17311e0087fd301456ee1c0b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:94b8975aa6651fe49f80fdedc2637758e7921b1e42391004717e5355d611c7dc +size 23265 diff --git a/data/2025/2504_10xxx/2504.10685/images/8e3b6e9faa6f1d53069bf24196c367f48a6404b2c370382e50a39429b69ee961.jpg b/data/2025/2504_10xxx/2504.10685/images/8e3b6e9faa6f1d53069bf24196c367f48a6404b2c370382e50a39429b69ee961.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f76b5066beb22748414b2de985ec16beaaabc9d9 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10685/images/8e3b6e9faa6f1d53069bf24196c367f48a6404b2c370382e50a39429b69ee961.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:09c6ceec33b18fcc1810c0131e776894bab0e05df3c68b0001de8fd483163ca4 +size 34417 diff --git a/data/2025/2504_10xxx/2504.10685/images/9f7c58c6153c5492de9247cf262c01c36f6cfc8b799078979c8b237f362f2ad7.jpg b/data/2025/2504_10xxx/2504.10685/images/9f7c58c6153c5492de9247cf262c01c36f6cfc8b799078979c8b237f362f2ad7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1f0a86ca27e9ebf3df28e1538386f9258773c247 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10685/images/9f7c58c6153c5492de9247cf262c01c36f6cfc8b799078979c8b237f362f2ad7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cf177010df33c6af8412e26d35792623f1e5c05894eb71fcfd46a91e6b82cc57 +size 28171 diff --git a/data/2025/2504_10xxx/2504.10685/images/a08ef0f9dc4809732e94f6a54a4e9fb6edf8cb2fe27dba8bc5508c95462c3ad5.jpg b/data/2025/2504_10xxx/2504.10685/images/a08ef0f9dc4809732e94f6a54a4e9fb6edf8cb2fe27dba8bc5508c95462c3ad5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fe0ab0ebcdb790d8233736736d5cbdd3a9baf1eb --- /dev/null +++ b/data/2025/2504_10xxx/2504.10685/images/a08ef0f9dc4809732e94f6a54a4e9fb6edf8cb2fe27dba8bc5508c95462c3ad5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fa5c77380c26c1504119111d90b18b1c6b46362ae103891fc4d15e09a047bb9d +size 40286 diff --git a/data/2025/2504_10xxx/2504.10685/images/a52655dc0e72448db1a5c90b37fef79880351951b1754df79cc9ae69a8a2f908.jpg b/data/2025/2504_10xxx/2504.10685/images/a52655dc0e72448db1a5c90b37fef79880351951b1754df79cc9ae69a8a2f908.jpg new file mode 100644 index 0000000000000000000000000000000000000000..dcdbea1eaf928907c356c9814aa1402989d46e80 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10685/images/a52655dc0e72448db1a5c90b37fef79880351951b1754df79cc9ae69a8a2f908.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a0977da0c28bdc5d520e8b820997c58cb28620ac550ed55905af9b6cc799d0cc +size 3325 diff --git a/data/2025/2504_10xxx/2504.10685/images/c281bd39de4691807c301ba17b6d49273f8d7a7767a20c78259bbb45b9f42084.jpg b/data/2025/2504_10xxx/2504.10685/images/c281bd39de4691807c301ba17b6d49273f8d7a7767a20c78259bbb45b9f42084.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4dbc19eaf7e961f4586f270cb980d057f09118ab --- /dev/null +++ b/data/2025/2504_10xxx/2504.10685/images/c281bd39de4691807c301ba17b6d49273f8d7a7767a20c78259bbb45b9f42084.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c9871adbdeb9f415ee72105d14c8b8a68ae3d7fec75ae4172e57a5ade36b6429 +size 36887 diff --git a/data/2025/2504_10xxx/2504.10685/images/c814ade6f48089cca51eff010a2397c2da259ccedc9241f7eec304464c67d65b.jpg b/data/2025/2504_10xxx/2504.10685/images/c814ade6f48089cca51eff010a2397c2da259ccedc9241f7eec304464c67d65b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d53544d0dd6a94e3ced8aab84c605871f21b5b7c --- /dev/null +++ b/data/2025/2504_10xxx/2504.10685/images/c814ade6f48089cca51eff010a2397c2da259ccedc9241f7eec304464c67d65b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5a6469de3f06798c75404aa4bdd1273900ebe22a38aa4d3e84e260e875270d98 +size 4623 diff --git a/data/2025/2504_10xxx/2504.10685/images/cda92802f910d1d9c86fcb9690a9c013f2143055bf9564f0a4664b98cfef3300.jpg b/data/2025/2504_10xxx/2504.10685/images/cda92802f910d1d9c86fcb9690a9c013f2143055bf9564f0a4664b98cfef3300.jpg new file mode 100644 index 0000000000000000000000000000000000000000..423bd16731ea1260bfd5f2c66f555e7636cf57e8 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10685/images/cda92802f910d1d9c86fcb9690a9c013f2143055bf9564f0a4664b98cfef3300.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:15097012c324532ae407c9a3bad6e490180fc8ce3c723fe658aeaeb51d36d63c +size 19627 diff --git a/data/2025/2504_10xxx/2504.10685/images/db3a8fdfeee41c36fdb097170cdd5cbd99260e8264a29ec9a9b48b94f98c62f1.jpg b/data/2025/2504_10xxx/2504.10685/images/db3a8fdfeee41c36fdb097170cdd5cbd99260e8264a29ec9a9b48b94f98c62f1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a86010f1ef710d185bf5071f1ed4be7623e6a279 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10685/images/db3a8fdfeee41c36fdb097170cdd5cbd99260e8264a29ec9a9b48b94f98c62f1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:456c0ae3a11e4c9ff5b9a0bb275226c829c9c7e241939187336e1bcea2a0925b +size 47462 diff --git a/data/2025/2504_10xxx/2504.10685/images/e1bae98ebc90a5af43497e591c20abbf6d9c63d8ec25b909a6702559d0ae8005.jpg b/data/2025/2504_10xxx/2504.10685/images/e1bae98ebc90a5af43497e591c20abbf6d9c63d8ec25b909a6702559d0ae8005.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2b7e144e862a51afeabe1ae14d8ce8e094316f1a --- /dev/null +++ b/data/2025/2504_10xxx/2504.10685/images/e1bae98ebc90a5af43497e591c20abbf6d9c63d8ec25b909a6702559d0ae8005.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9c8db5396a612d2d797bfe706ee60386a47bf6917d6ffb51cc149da0f5279e81 +size 24241 diff --git a/data/2025/2504_10xxx/2504.10685/images/e2f28966d29b1d9309d4b9cb28111ee3b3de5a35e827252d153c528d74e5800c.jpg b/data/2025/2504_10xxx/2504.10685/images/e2f28966d29b1d9309d4b9cb28111ee3b3de5a35e827252d153c528d74e5800c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d4c960e1be0e4885468f4881ea8cd7ce769dda78 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10685/images/e2f28966d29b1d9309d4b9cb28111ee3b3de5a35e827252d153c528d74e5800c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:01281d4243a204bef43843a66784f65125ca54b52691b73db761029afc74f893 +size 27871 diff --git a/data/2025/2504_10xxx/2504.10685/images/e6bd5a1a7ac2872e0e9ebd77446c8317fcdc5f074f12be87eed64ee98bdfa6ce.jpg b/data/2025/2504_10xxx/2504.10685/images/e6bd5a1a7ac2872e0e9ebd77446c8317fcdc5f074f12be87eed64ee98bdfa6ce.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2ba63712bca50e8b7272409e4bc9cc4502a7bc81 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10685/images/e6bd5a1a7ac2872e0e9ebd77446c8317fcdc5f074f12be87eed64ee98bdfa6ce.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e291f62443a04ce67063d31894c6796890b1e639922892cd8d76cb8deabf44a1 +size 143389 diff --git a/data/2025/2504_10xxx/2504.10685/images/e99f2e71e525eaa1944ac9b617b933a5aa42cf899e9ed92b704bb56a99e68e67.jpg b/data/2025/2504_10xxx/2504.10685/images/e99f2e71e525eaa1944ac9b617b933a5aa42cf899e9ed92b704bb56a99e68e67.jpg new file mode 100644 index 0000000000000000000000000000000000000000..74fe48f195332bd16450729ff5c8261a9a9683c9 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10685/images/e99f2e71e525eaa1944ac9b617b933a5aa42cf899e9ed92b704bb56a99e68e67.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:38c7f37d93ae3697df3bd1cb7464811c5bac388f5126813c0494b78dfac3ccb1 +size 23436 diff --git a/data/2025/2504_10xxx/2504.10685/images/faf9e372c6d33c67159205a2853d4200013b769eca66abbbf066658facb6487a.jpg b/data/2025/2504_10xxx/2504.10685/images/faf9e372c6d33c67159205a2853d4200013b769eca66abbbf066658facb6487a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c094eaaadd2dce19f584e8c48852af9fd6305bfe --- /dev/null +++ b/data/2025/2504_10xxx/2504.10685/images/faf9e372c6d33c67159205a2853d4200013b769eca66abbbf066658facb6487a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bd20df8912e69fe50606886f718dcbb082f2f4300cf1912d3d5bde1431117134 +size 3863 diff --git a/data/2025/2504_10xxx/2504.10685/images/fe1b95b5306e4a2471bb335e8e5c2e0ea900709f9c7ddd2f832aaf841afa5b00.jpg b/data/2025/2504_10xxx/2504.10685/images/fe1b95b5306e4a2471bb335e8e5c2e0ea900709f9c7ddd2f832aaf841afa5b00.jpg new file mode 100644 index 0000000000000000000000000000000000000000..907e674ad35189f1bbdff9fc105de3ccc1e8953e --- /dev/null +++ b/data/2025/2504_10xxx/2504.10685/images/fe1b95b5306e4a2471bb335e8e5c2e0ea900709f9c7ddd2f832aaf841afa5b00.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d5857217d05aeb42f27366e855919142e72c5863cea093cc355da447a7e9c848 +size 4252 diff --git a/data/2025/2504_10xxx/2504.10685/layout.json b/data/2025/2504_10xxx/2504.10685/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..a6726ee8c5fd17bf70628efb36810e861f951e94 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10685/layout.json @@ -0,0 +1,22458 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 91, + 102, + 519, + 138 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 91, + 102, + 519, + 138 + ], + "spans": [ + { + "bbox": [ + 91, + 102, + 519, + 138 + ], + "type": "text", + "content": "NTIRE 2025 Challenge on Cross-Domain Few-Shot Object Detection: Methods and Results" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 91, + 162, + 530, + 318 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 91, + 162, + 530, + 318 + ], + "spans": [ + { + "bbox": [ + 91, + 162, + 530, + 318 + ], + "type": "text", + "content": "Yuqian Fu\\* Xingyu Qiu\\* Bin Ren\\* Yanwei Fu\\* Radu Timofte\\* Nicu Sebe\\* Ming-Hsuan Yang\\* Luc Van Gool\\* Kaijin Zhang Qingpeng Nong Xiugang Dong Hong Gao Xiangsheng Zhou Jiancheng Pan Yanxing Liu Xiao He Jiahao Li Yuze Sun Xiaomeng Huang Zhenyu Zhang Ran Ma Yuhan Liu Zijian Zhuang Shuai Yi Yixiong Zou Lingyi Hong Mingxi Chen Runze Li Xingdong Sheng Wenqiang Zhang Weisen Chen Yongxin Yan Xinguo Chen Yuanjie Shao Zhengrong Zuo Nong Sang Hao Wu Haoran Sun Shuming Hu Yan Zhang Zhiguang Shi Yu Zhang Chao Chen Tao Wang Da Feng Linhai Zhuo Ziming Lin Yali Huang Jie Me Yiming Yang Mi Guo Mingyuan Jiu Mingliang Xu Maomao Xiong Qunshu Zhang Xinyu Cao Yuqing Yang Dianmo Sheng Xuanpu Zhao Zhiyu Li Xuyang Ding Wenqian Li" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 152, + 342, + 200, + 355 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 152, + 342, + 200, + 355 + ], + "spans": [ + { + "bbox": [ + 152, + 342, + 200, + 355 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 54, + 368, + 297, + 559 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 368, + 297, + 559 + ], + "spans": [ + { + "bbox": [ + 54, + 368, + 297, + 559 + ], + "type": "text", + "content": "Cross-Domain Few-Shot Object Detection (CD-FSOD) poses significant challenges to existing object detection and few-shot detection models when applied across domains. In conjunction with NTIRE 2025, we organized the 1st CD-FSOD Challenge, aiming to advance the performance of current object detectors on entirely novel target domains with only limited labeled data. The challenge attracted 152 registered participants, received submissions from 42 teams, and concluded with 13 teams making valid final submissions. Participants approached the task from diverse perspectives, proposing novel models that achieved new state-of-the-art (SOTA) results under both open-source and closed-source settings. In this report, we present an overview of the 1st NTIRE 2025 CD-FSOD Challenge, highlighting the proposed solutions and summarizing the results submitted by the participants." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 581, + 135, + 594 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 581, + 135, + 594 + ], + "spans": [ + { + "bbox": [ + 55, + 581, + 135, + 594 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 601, + 295, + 650 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 601, + 295, + 650 + ], + "spans": [ + { + "bbox": [ + 55, + 601, + 295, + 650 + ], + "type": "text", + "content": "Few-shot object detection (FSOD) [28] aims at allowing models to detect novel objects using minimal labeled examples. While significant progress has been made, existing FSOD methods [53, 63, 64, 68, 75, 85] typically as" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 313, + 344, + 555, + 476 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 344, + 555, + 476 + ], + "spans": [ + { + "bbox": [ + 313, + 344, + 555, + 476 + ], + "type": "text", + "content": "sume that the training (source) and testing (target) data are drawn from the same domain. However, this assumption rarely holds in real-world applications. For instance, a model trained on natural images such as those in MS-COCO [41] may face substantial challenges when applied to a novel domain like remote sensing imagery. This cross-domain few-shot learning (CD-FSL) problem has attracted considerable attention in the context of classification [12-14, 18, 36, 55, 56, 71, 72, 83, 84, 86, 87], whereas its extension to object detection—i.e., cross-domain few-shot object detection (CD-FSOD)—remains much less explored." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 480, + 556, + 601 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 480, + 556, + 601 + ], + "spans": [ + { + "bbox": [ + 313, + 480, + 556, + 601 + ], + "type": "text", + "content": "Upon gaping at this gap, one recent work, CD-ViTO [15], reveals that the different object detection datasets exhibit various characters in style, inter-class variance (ICV), and indefinable boundaries (IB). To further investigate how these factors affect the CD-FSOD, CD-ViTO thus proposes a new benchmark which takes MS-COCO as the source domain and six distinct datasets with diverse style, ICV, IB as unseen targets. Results indicate that the prior detectors all fail to generalize to those targets when the domain gap issue is observed." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 605, + 556, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 605, + 556, + 715 + ], + "spans": [ + { + "bbox": [ + 313, + 605, + 556, + 715 + ], + "type": "text", + "content": "To further promote the advances on CD-FSOD, we newly introduce three more unseen targets, DeepFruits [60], Carpk [20], and CarDD [76] as testbeds for the CD-FSOD detectors. Following the observations in CD-ViTO, these three targets have domains different from the source data, with varying styles, ICV, and IB. Furthermore, to maximally boost the performance of models, we define the task setting proposed in CD-ViTO as closed-source CD-FSOD, while further introducing the new open-source CD-FSOD" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 14, + 208, + 37, + 559 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 208, + 37, + 559 + ], + "spans": [ + { + "bbox": [ + 14, + 208, + 37, + 559 + ], + "type": "text", + "content": "arXiv:2504.10685v1 [cs.CV] 14 Apr 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 654, + 295, + 713 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 654, + 295, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 654, + 295, + 713 + ], + "type": "text", + "content": "* Yuqian Fu, Xingyu Qiu, Bin Ren, Yanwei Fu, Radu Timofte, Nicu Sebe, Ming-Hsuan Yang, and Luc Van Gool are the NTIRE2025 challenge organizers. The other authors are participants in this challenge. Appendix A contains the authors' team names and affiliations. NTIRE2025 webpage: https://cvlai.net/ntire/2025/. Challenge Codes: https://github.com/lovelyqian/NTIRE2025_CDFSOD." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 294, + 144 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 294, + 144 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 294, + 144 + ], + "type": "text", + "content": "setting. To be specific, the closed-source setting means the source data for model training is strictly limited, e.g., MS-COCO as in CD-ViTO; while the open-source setting relaxes this limitation and allows the participants to leverage diverse knowledge sources and foundation models to explore the upper bound on the target domains." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 144, + 294, + 300 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 144, + 294, + 300 + ], + "spans": [ + { + "bbox": [ + 55, + 144, + 294, + 300 + ], + "type": "text", + "content": "In collaboration with the 2025 New Trends in Image Restoration and Enhancement (NTIRE 2025) Workshop, which is particularly interested in the model robustness under changing conditions, we present the 1st CD-FSOD Challenge. It features an open-source CD-FSOD as the main track and a closed-source CD-FSOD as a special track. For the closed-source track, MS-COCO serves as the sole source domain. The validation phase includes six target domains proposed in CD-ViTO. Three additional novel domains are used as the final test sets for both tracks. Mean Average Precision (mAP) is employed as the ranking metric. We believe this challenge will drive progress in the CD-FSOD field and foster meaningful algorithmic innovations." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 300, + 295, + 515 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 300, + 295, + 515 + ], + "spans": [ + { + "bbox": [ + 55, + 300, + 295, + 515 + ], + "type": "text", + "content": "This challenge is one of the NTIRE " + }, + { + "bbox": [ + 55, + 300, + 295, + 515 + ], + "type": "inline_equation", + "content": "2025^{1}" + }, + { + "bbox": [ + 55, + 300, + 295, + 515 + ], + "type": "text", + "content": " Workshop associated challenges on: ambient lighting normalization [74], reflection removal in the wild [81], shadow removal [73], event-based image deblurring [69], image denoising [70], XGC quality assessment [44], UGC video enhancement [61], night photography rendering [9], image super-resolution (x4) [3], real-world face restoration [4], efficient super-resolution [57], HR depth estimation [82], efficient burst HDR and restoration [32], cross-domain few-shot object detection [16], short-form UGC video quality assessment and enhancement [38, 39], text to image generation model quality assessment [19], day and night raindrop removal for dual-focused images [37], video quality assessment for video conferencing [24], low light image enhancement [45], light field super-resolution [77], restore any image model (RAIM) in the wild [40], raw restoration and super-resolution [5], and raw reconstruction from RGB on smartphones [6]." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 525, + 246, + 539 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 525, + 246, + 539 + ], + "spans": [ + { + "bbox": [ + 55, + 525, + 246, + 539 + ], + "type": "text", + "content": "2. NTIRE 2025 CD-FSOD Challenge" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 545, + 173, + 558 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 545, + 173, + 558 + ], + "spans": [ + { + "bbox": [ + 55, + 545, + 173, + 558 + ], + "type": "text", + "content": "2.1. Challenge Overview" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 563, + 295, + 694 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 563, + 295, + 694 + ], + "spans": [ + { + "bbox": [ + 55, + 563, + 295, + 694 + ], + "type": "text", + "content": "Our challenge aims to advance Cross-Domain Few-Shot Object Detection (CD-FSOD) — detecting objects under domain shifts with limited labeled data. We use six previously published target domains [15] as validation sets and introduce three newly constructed datasets for final testing. Beyond the dataset update, we introduce open-source CD-FSOD as a new setting, allowing participants to freely choose source datasets and pre-trained models to enhance generalization. Fig. 1 illustrates both the predefined closed-source CD-FSOD and the new open-source CD-FSOD settings, along with the newly introduced target domains." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 314, + 72, + 425, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 72, + 425, + 83 + ], + "spans": [ + { + "bbox": [ + 314, + 72, + 425, + 83 + ], + "type": "text", + "content": "2.2. Task Formulations" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 313, + 89, + 553, + 220 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 89, + 553, + 220 + ], + "spans": [ + { + "bbox": [ + 313, + 89, + 553, + 220 + ], + "type": "text", + "content": "Closed-Source CD-FSOD. Given a source dataset " + }, + { + "bbox": [ + 313, + 89, + 553, + 220 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_S" + }, + { + "bbox": [ + 313, + 89, + 553, + 220 + ], + "type": "text", + "content": " and a novel target dataset " + }, + { + "bbox": [ + 313, + 89, + 553, + 220 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_T" + }, + { + "bbox": [ + 313, + 89, + 553, + 220 + ], + "type": "text", + "content": ", the closed-source CD-FSOD track assumes that the source class set " + }, + { + "bbox": [ + 313, + 89, + 553, + 220 + ], + "type": "inline_equation", + "content": "\\mathcal{C}_S" + }, + { + "bbox": [ + 313, + 89, + 553, + 220 + ], + "type": "text", + "content": " and the target class set " + }, + { + "bbox": [ + 313, + 89, + 553, + 220 + ], + "type": "inline_equation", + "content": "\\mathcal{C}_T" + }, + { + "bbox": [ + 313, + 89, + 553, + 220 + ], + "type": "text", + "content": " are completely disjoint, i.e., " + }, + { + "bbox": [ + 313, + 89, + 553, + 220 + ], + "type": "inline_equation", + "content": "\\mathcal{C}_S \\cap \\mathcal{C}_T = \\emptyset" + }, + { + "bbox": [ + 313, + 89, + 553, + 220 + ], + "type": "text", + "content": ". Additionally, the distributions of the source domain " + }, + { + "bbox": [ + 313, + 89, + 553, + 220 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_S" + }, + { + "bbox": [ + 313, + 89, + 553, + 220 + ], + "type": "text", + "content": " and the target domain " + }, + { + "bbox": [ + 313, + 89, + 553, + 220 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_T" + }, + { + "bbox": [ + 313, + 89, + 553, + 220 + ], + "type": "text", + "content": " are not identical. Participants are required to train models on " + }, + { + "bbox": [ + 313, + 89, + 553, + 220 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_S" + }, + { + "bbox": [ + 313, + 89, + 553, + 220 + ], + "type": "text", + "content": " and test them on " + }, + { + "bbox": [ + 313, + 89, + 553, + 220 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_T" + }, + { + "bbox": [ + 313, + 89, + 553, + 220 + ], + "type": "text", + "content": ", where each class in " + }, + { + "bbox": [ + 313, + 89, + 553, + 220 + ], + "type": "inline_equation", + "content": "\\mathcal{C}_T" + }, + { + "bbox": [ + 313, + 89, + 553, + 220 + ], + "type": "text", + "content": " has only a few labeled examples. Usually, " + }, + { + "bbox": [ + 313, + 89, + 553, + 220 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_S" + }, + { + "bbox": [ + 313, + 89, + 553, + 220 + ], + "type": "text", + "content": " is a single dataset, as in CD-ViTO [15]. We refer to this setting as closed-source CD-FSOD to differentiate it from the open-source variant." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 221, + 553, + 376 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 221, + 553, + 376 + ], + "spans": [ + { + "bbox": [ + 313, + 221, + 553, + 376 + ], + "type": "text", + "content": "Open-Source CD-FSOD. In contrast to the closed-source setting where training data is strictly limited, the open-source CD-FSOD track is designed to leverage the capabilities of foundation models. Since these models are pretrained on large-scale and diverse datasets, it is practically hard to trace all the knowledge embedded within them. Hence, we refer to this setting as open-source. While the relaxed constraints on source data make it difficult to strictly ensure non-overlapping classes between the source and target data, the track still focuses on addressing the core challenges of domain shift and few-shot object detection. We believe this setting will significantly accelerate the development of CD-FSOD methods for real-world applications." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 377, + 553, + 436 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 377, + 553, + 436 + ], + "spans": [ + { + "bbox": [ + 313, + 377, + 553, + 436 + ], + "type": "text", + "content": "In this challenge, the open-source CD-FSOD is designated as the main track, with awards presented to the top three teams. The closed-source CD-FSOD serves as the special track, with a single award granted to the top-performing team." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 437, + 553, + 520 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 437, + 553, + 520 + ], + "spans": [ + { + "bbox": [ + 313, + 437, + 553, + 520 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 313, + 437, + 553, + 520 + ], + "type": "text", + "content": "-way " + }, + { + "bbox": [ + 313, + 437, + 553, + 520 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 313, + 437, + 553, + 520 + ], + "type": "text", + "content": "-shot Protocol. We adopt the " + }, + { + "bbox": [ + 313, + 437, + 553, + 520 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 313, + 437, + 553, + 520 + ], + "type": "text", + "content": "-way " + }, + { + "bbox": [ + 313, + 437, + 553, + 520 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 313, + 437, + 553, + 520 + ], + "type": "text", + "content": "-shot evaluation protocol. For each novel class in the target class set " + }, + { + "bbox": [ + 313, + 437, + 553, + 520 + ], + "type": "inline_equation", + "content": "\\mathcal{C}_T" + }, + { + "bbox": [ + 313, + 437, + 553, + 520 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 313, + 437, + 553, + 520 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 313, + 437, + 553, + 520 + ], + "type": "text", + "content": " labeled instances are provided, forming the support set " + }, + { + "bbox": [ + 313, + 437, + 553, + 520 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 313, + 437, + 553, + 520 + ], + "type": "text", + "content": ". The remaining unlabeled instances constitute the query set " + }, + { + "bbox": [ + 313, + 437, + 553, + 520 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 313, + 437, + 553, + 520 + ], + "type": "text", + "content": ". Instances contained in the support set " + }, + { + "bbox": [ + 313, + 437, + 553, + 520 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 313, + 437, + 553, + 520 + ], + "type": "text", + "content": " are used to assist the model in recognizing and detecting the objects in " + }, + { + "bbox": [ + 313, + 437, + 553, + 520 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 313, + 437, + 553, + 520 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 314, + 528, + 481, + 540 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 528, + 481, + 540 + ], + "spans": [ + { + "bbox": [ + 314, + 528, + 481, + 540 + ], + "type": "text", + "content": "2.3. Challenge Phases and Datasets" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 545, + 553, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 545, + 553, + 605 + ], + "spans": [ + { + "bbox": [ + 313, + 545, + 553, + 605 + ], + "type": "text", + "content": "This challenge involves one development stage and one testing stage. The source data " + }, + { + "bbox": [ + 313, + 545, + 553, + 605 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_S" + }, + { + "bbox": [ + 313, + 545, + 553, + 605 + ], + "type": "text", + "content": " for both stages is the same, i.e., MS-COCO [41] for the closed-source track and unlimited data for the open-source track. While the testing data " + }, + { + "bbox": [ + 313, + 545, + 553, + 605 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_T" + }, + { + "bbox": [ + 313, + 545, + 553, + 605 + ], + "type": "text", + "content": " is different." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 605, + 553, + 654 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 605, + 553, + 654 + ], + "spans": [ + { + "bbox": [ + 313, + 605, + 553, + 654 + ], + "type": "text", + "content": "Development Stage: Datasets proposed in the CD-ViTO, including ArTaxOr [8], Clipart1K [23], DIOR [34], Deep-Fish [62], NEU-DET [67], and UODD [26] are taken as targets " + }, + { + "bbox": [ + 313, + 605, + 553, + 654 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_T" + }, + { + "bbox": [ + 313, + 605, + 553, + 654 + ], + "type": "text", + "content": " during development stage." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 654, + 553, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 654, + 553, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 654, + 553, + 713 + ], + "type": "text", + "content": "Testing Stage. Three previously unseen datasets (DeepFruits [60], Carpk [20], and CarDD [76]) are introduced and used as the targets " + }, + { + "bbox": [ + 313, + 654, + 553, + 713 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_T" + }, + { + "bbox": [ + 313, + 654, + 553, + 713 + ], + "type": "text", + "content": " for the final testing phase. Note that the ground truth annotations for these query sets are held exclusively by the challenge organizers." + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 66, + 702, + 231, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 702, + 231, + 712 + ], + "spans": [ + { + "bbox": [ + 66, + 702, + 231, + 712 + ], + "type": "text", + "content": "1https://www.cvlai.net/ntire/2025/" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 56, + 75, + 553, + 214 + ], + "blocks": [ + { + "bbox": [ + 56, + 75, + 553, + 214 + ], + "lines": [ + { + "bbox": [ + 56, + 75, + 553, + 214 + ], + "spans": [ + { + "bbox": [ + 56, + 75, + 553, + 214 + ], + "type": "image", + "image_path": "1bc0d3ab7ab85fb2208d5f61d937c337f5fbc4fba1fc2687c691f111074cfeb2.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 54, + 226, + 555, + 251 + ], + "lines": [ + { + "bbox": [ + 54, + 226, + 555, + 251 + ], + "spans": [ + { + "bbox": [ + 54, + 226, + 555, + 251 + ], + "type": "text", + "content": "Figure 1. Illustration of the challenge settings, including the closed-source and open-source CD-FSOD tracks. The three newly introduced target datasets used in the final testing phase are also shown." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 269, + 200, + 282 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 269, + 200, + 282 + ], + "spans": [ + { + "bbox": [ + 55, + 269, + 200, + 282 + ], + "type": "text", + "content": "2.4. CD-ViTO Baseline Model" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 287, + 296, + 384 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 287, + 296, + 384 + ], + "spans": [ + { + "bbox": [ + 55, + 287, + 296, + 384 + ], + "type": "text", + "content": "We take CD-ViTO, the current State-of-the-art (SOTA) method under the closed-source setting, as the baseline for this challenge. Briefly, CD-ViTO is built upon DE-ViT [85], an open-set detector, and fine-tuned using the support set. As in Fig. 2, modules in blue are inherited from DE-ViT, while modules in orange are newly proposed. New improvements include learnable instance features, instance reweighting, domain prompter, and finetuning." + } + ] + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 58, + 393, + 296, + 500 + ], + "blocks": [ + { + "bbox": [ + 58, + 393, + 296, + 500 + ], + "lines": [ + { + "bbox": [ + 58, + 393, + 296, + 500 + ], + "spans": [ + { + "bbox": [ + 58, + 393, + 296, + 500 + ], + "type": "image", + "image_path": "db3a8fdfeee41c36fdb097170cdd5cbd99260e8264a29ec9a9b48b94f98c62f1.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 65, + 510, + 282, + 522 + ], + "lines": [ + { + "bbox": [ + 65, + 510, + 282, + 522 + ], + "spans": [ + { + "bbox": [ + 65, + 510, + 282, + 522 + ], + "type": "text", + "content": "Figure 2. Overall framework of CD-ViTO baseline method." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 534, + 296, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 534, + 296, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 534, + 296, + 713 + ], + "type": "text", + "content": "Intuitively, the learnable instance feature module is designed to enhance inter-class variance (ICV) among different target classes by making the initially fixed instance features learnable and optimizing them through supervised few-shot detection tasks on the target support set. The instance reweighting module further improves prototype quality by assigning higher weights to high-quality object instances—e.g., those with minimal indefinable boundary (IB). These weights are learned via a lightweight MLP and fully connected layer, as illustrated in the upper part of Fig. 2(b). The domain prompter module introduces learnable domain perturbations to simulate varying domain styles. These perturbations are applied to object prototypes, followed by a prototype consistency loss to ensure that the introduced perturbations do not affect the seman-" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 313, + 270, + 555, + 368 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 270, + 555, + 368 + ], + "spans": [ + { + "bbox": [ + 313, + 270, + 555, + 368 + ], + "type": "text", + "content": "tic category of the prototypes. Simultaneously, a domain diversity loss encourages the generated domains to be sufficiently diverse. The lower part of Fig. 2(b) illustrates this mechanism. By injecting virtual domains and enforcing robustness against the induced perturbations, this strategy enhances the model's generalization under domain shifts. Finetuning is applied to the modules highlighted with fire icons in Fig. 2." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 383, + 430, + 396 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 383, + 430, + 396 + ], + "spans": [ + { + "bbox": [ + 313, + 383, + 430, + 396 + ], + "type": "text", + "content": "2.5. Evaluation Protocol" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 404, + 555, + 513 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 404, + 555, + 513 + ], + "spans": [ + { + "bbox": [ + 313, + 404, + 555, + 513 + ], + "type": "text", + "content": "The final score is measured based on the model's performance on the three datasets of the testing stage. For each dataset, we validate the models on three different few-shot settings: 1-shot, 5-shot, and 10-shot. This results in a total of nine mean Average Precision (mAP) scores: D1_1shot, D1_5shot, D1_10shot; D2_1shot, D2_5shot, D2_10shot; and D3_1shot, D3_5shot, D3_10shot. The D1, D2, D3 denote the Deep-Fruits, Carpk, and CarDD, respectively." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 515, + 555, + 540 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 515, + 555, + 540 + ], + "spans": [ + { + "bbox": [ + 313, + 515, + 555, + 540 + ], + "type": "text", + "content": "The final ranking score is computed as a weighted average avg() of these scores:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 555, + 561, + 597 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 555, + 561, + 597 + ], + "spans": [ + { + "bbox": [ + 313, + 555, + 561, + 597 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\text {S c o r e} = 2 * \\text {a v g} (\\mathrm {D} 1 _ {-} 1 \\text {s h o t}, \\mathrm {D} 2 _ {-} 1 \\text {s h o t}, \\mathrm {D} 3 _ {-} 1 \\text {s h o t}) \\\\ + 1 * a v g (D 1 \\_ 5 s h o t, D 2 \\_ 5 s h o t, D 3 \\_ 5 s h o t) \\\\ + 1 * a v g (D 1. 1 0 s h o t, D 2. 1 0 s h o t, D 3. 1 0 s h o t) \\\\ \\end{array}", + "image_path": "4f59d8b52cddbabd2debd7634615c8b6f658ffbcf42f7cee12d224037c00c21e.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 617, + 555, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 617, + 555, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 617, + 555, + 713 + ], + "type": "text", + "content": "Rationale for Weighted Scoring. We assign a higher weight " + }, + { + "bbox": [ + 313, + 617, + 555, + 713 + ], + "type": "inline_equation", + "content": "(\\times 2)" + }, + { + "bbox": [ + 313, + 617, + 555, + 713 + ], + "type": "text", + "content": " to the 1-shot setting for two primary reasons: (1) Performance in the 1-shot scenario is generally lower than in the 5-shot and 10-shot settings due to the limited availability of labeled examples for adaptation; and (2) emphasizing 1-shot performance encourages the development of models that are more robust and effective in extremely low-data conditions." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 58, + 101, + 555, + 272 + ], + "blocks": [ + { + "bbox": [ + 55, + 70, + 555, + 94 + ], + "lines": [ + { + "bbox": [ + 55, + 70, + 555, + 94 + ], + "spans": [ + { + "bbox": [ + 55, + 70, + 555, + 94 + ], + "type": "text", + "content": "Table 1. Open-source and closed-source results on CD-FSOD. D1, D2, and D3 represent DeepFruits, CARPK, and CarDD, respectively. Mean Average Precision (mAP) on 1-shot, 5-shot, and 10-shot are reported. Teams achieving top results are highlighted." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 58, + 101, + 555, + 272 + ], + "lines": [ + { + "bbox": [ + 58, + 101, + 555, + 272 + ], + "spans": [ + { + "bbox": [ + 58, + 101, + 555, + 272 + ], + "type": "table", + "html": "
Main Open-Source Track
RankTeam NameScoreD1_1shotD1_5shotD1_10shotD2_1shotD2_5shotD2_10shotD3_1shotD3_5shotD3_10shot
1MoveFree231.0166.1864.5862.5760.4358.8959.0048.7549.2848.00
2AI4EarthLab215.9261.1965.4165.3559.1558.0559.0034.2143.8547.00
3IDCFS215.4863.3465.4164.7561.1460.4260.0032.3339.2443.00
4FDUROILab_Lenovo211.5561.2562.8964.6659.2459.2459.0035.1337.6340.00
5HUSTLab210.7863.7161.3257.1960.4260.4760.0031.0140.0943.00
6TongjiLab172.1442.3641.9041.7455.9555.9555.0031.4031.4031.00
7Manifold159.8632.0544.2844.2757.0657.0657.0018.7129.3432.00
8MXT108.2022.2640.5741.3421.1226.3430.2323.8128.0029.00
Special Closed-Source Track
RankTeam NameScoreD1_1shotD1_5shotD1_10shotD2_1shotD2_5shotD2_10shotD3_1shotD3_5shotD3_10shot
1X-Few125.9036.5846.9550.9823.0129.6828.0020.1129.6833.00
2MM117.3932.4745.2350.2318.8329.3628.0018.3129.1431.00
3FSV112.8131.2343.8949.3213.6926.0426.5919.7130.1633.17
4IPC105.6232.5847.1245.6413.4120.7713.0018.1829.9832.00
5LJY105.2833.5246.0445.3410.6811.4525.0018.3430.9432.00
/CD-ViTO Base [15]91.0027.9537.4243.586.7721.2824.0010.0726.4730.00
", + "image_path": "e6bd5a1a7ac2872e0e9ebd77446c8317fcdc5f074f12be87eed64ee98bdfa6ce.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 289, + 163, + 304 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 289, + 163, + 304 + ], + "spans": [ + { + "bbox": [ + 55, + 289, + 163, + 304 + ], + "type": "text", + "content": "3. Challenge Results" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 311, + 295, + 382 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 311, + 295, + 382 + ], + "spans": [ + { + "bbox": [ + 55, + 311, + 295, + 382 + ], + "type": "text", + "content": "Among the 152 registered participants, 8 and 5 teams have participated the final testing stage and submitted their results, codes, and factsheets. Table. 1 summarizes the results of these methods. Detailed descriptions of the participants' solutions are provided in Sec.4 and Sec.5, each corresponding to a different track." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 385, + 295, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 385, + 295, + 479 + ], + "spans": [ + { + "bbox": [ + 55, + 385, + 295, + 479 + ], + "type": "text", + "content": "Open-Source Track Results. In the open-source track, nearly all participating teams achieved strong performance with clear improvements over the provided CD-ViTO baseline. This highlights not only the effectiveness of their proposed methods but also the significance of introducing this new task setting. As observed, relaxing the strict limitation on the source data offers a substantial advantage in tackling the CD-FSOD task." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 483, + 295, + 542 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 483, + 295, + 542 + ], + "spans": [ + { + "bbox": [ + 55, + 483, + 295, + 542 + ], + "type": "text", + "content": "Specifically, the teams MoveFree, AI4EarthLab, and IDCFS emerged as the top performers in this track, achieving scores of 231.01, 215.92, and 215.48, respectively—significantly surpassing the baseline and other teams under the same track." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 544, + 295, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 544, + 295, + 616 + ], + "spans": [ + { + "bbox": [ + 55, + 544, + 295, + 616 + ], + "type": "text", + "content": "Closed-Source Track Results. The performance achieved by the closed-source track teams is generally lower than that of the open-source track. This is quite understandable considering that the closed-source track enforces stricter constraints. Nevertheless, the participants managed to improve the baseline method clearly." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 618, + 295, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 618, + 295, + 714 + ], + "spans": [ + { + "bbox": [ + 55, + 618, + 295, + 714 + ], + "type": "text", + "content": "In particular, the X-Few team stands out with a final score of 125.90, significantly outperforming other competitors. This shows that well-designed architectures and training strategies can still bring notable gains even without relying on large external models. Other teams in this track also delivered solid improvements. Their contributions are valuable in terms of enabling fair comparisons and emphasizing algorithmic annotations." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 289, + 508, + 303 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 289, + 508, + 303 + ], + "spans": [ + { + "bbox": [ + 313, + 289, + 508, + 303 + ], + "type": "text", + "content": "4. Main Open-Source Track Methods" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 309, + 384, + 320 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 309, + 384, + 320 + ], + "spans": [ + { + "bbox": [ + 313, + 309, + 384, + 320 + ], + "type": "text", + "content": "4.1. MoveFree" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 328, + 418, + 340 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 328, + 418, + 340 + ], + "spans": [ + { + "bbox": [ + 313, + 328, + 418, + 340 + ], + "type": "text", + "content": "4.1.1. Proposed Method" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 343, + 555, + 488 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 343, + 555, + 488 + ], + "spans": [ + { + "bbox": [ + 313, + 343, + 555, + 488 + ], + "type": "text", + "content": "Open-set object detectors, such as [35], [43], and [58], are designed to detect objects based on arbitrary text descriptions. These models are typically pre-trained on large-scale, well-annotated datasets, ensuring strong alignment between textual and visual modalities. As a result, they exhibit remarkable zero-shot capabilities, allowing them to recognize and localize unseen object categories based solely on textual prompts. Given the strong generalization ability of such open-set detectors, this team believes that they are inherently well-suited for cross-domain few-shot object detection, as their robust pre-trained representations can be effectively adapted to new domains with minimal supervision." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 488, + 556, + 619 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 488, + 556, + 619 + ], + "spans": [ + { + "bbox": [ + 313, + 488, + 556, + 619 + ], + "type": "text", + "content": "Thus, the MoveFree team focuses on leveraging and enhancing pre-trained open-set object detectors for CD-FSOD during the fine-tuning stage. The proposed approach introduces three key improvements: (1) To address the issue of missing annotations, self-training is introduced to iteratively refine the training data, thereby enhancing fine-tuning performance. (2) A Mixture-of-Experts (MoE) architecture is integrated into the open-set object detector to improve adaptability and robustness in the few-shot setting. (3) A two-stage fine-tuning pipeline is designed carefully. Code is made available2." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 620, + 556, + 693 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 620, + 556, + 693 + ], + "spans": [ + { + "bbox": [ + 313, + 620, + 556, + 693 + ], + "type": "text", + "content": "Self-training Paradigm. According to the definition of few-shot object detection in CD-ViTO[15], " + }, + { + "bbox": [ + 313, + 620, + 556, + 693 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 313, + 620, + 556, + 693 + ], + "type": "text", + "content": "-shot object detection refers to having " + }, + { + "bbox": [ + 313, + 620, + 556, + 693 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 313, + 620, + 556, + 693 + ], + "type": "text", + "content": " labeled instances in the training data, rather than " + }, + { + "bbox": [ + 313, + 620, + 556, + 693 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 313, + 620, + 556, + 693 + ], + "type": "text", + "content": " fully annotated images. This implies that instances of target categories may lack annotations in the provided training set." + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 324, + 702, + 528, + 713 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 324, + 702, + 528, + 713 + ], + "spans": [ + { + "bbox": [ + 324, + 702, + 528, + 713 + ], + "type": "text", + "content": "2https://github.com/KAIJINZ228/Few_Shot_GD" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 296, + 275 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 296, + 275 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 296, + 275 + ], + "type": "text", + "content": "Upon careful investigation, this team identified that the issue of incomplete annotations is prominent across all three test datasets in this challenge. Drawing on their expertise in developing open-set object detectors, the team recognized that missing annotations for target categories can significantly degrade model performance. This degradation occurs because the loss function penalizes the model for correctly detecting unannotated objects, mistakenly treating them as false positives due to their absence in the ground truth labels. Therefore, this team employs a self-training strategy during the fine-tuning stage of Grounding DINO to iteratively refine the annotations in the training data. Specifically, Grounding DINO periodically generates predictions on the training set, which are then incorporated as additional annotations. This iterative process gradually improves the quality of the training data, ultimately leading to enhanced model performance." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 275, + 296, + 371 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 275, + 296, + 371 + ], + "spans": [ + { + "bbox": [ + 55, + 275, + 296, + 371 + ], + "type": "text", + "content": "The substitution of the Mixture-of-Experts (MoE). In few-shot object detection, the availability of training data is highly limited. Therefore, maximizing the object detector's ability to extract supervision from this scarce data is crucial during the fine-tuning stage. In this challenge, beyond the few-shot constraint, the cross-domain setting further increases the difficulty, as detectors usually require additional supervision to effectively adapt to a new domain." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 371, + 296, + 467 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 371, + 296, + 467 + ], + "spans": [ + { + "bbox": [ + 55, + 371, + 296, + 467 + ], + "type": "text", + "content": "The core concept of the MoE architecture is to enable different components (i.e., experts) of a model to specialize in different aspects of the data [2]. In recent years, MoE has gained popularity in multi-modal models, including Mistral [25] and DeepSeek-V2 [42]. A common application of MoE in such models is replacing the traditional feedforward network (FFN) with an MoE-based variant, as seen in Switch Transformer [10] and OpenMoe [80]." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 467, + 296, + 586 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 467, + 296, + 586 + ], + "spans": [ + { + "bbox": [ + 55, + 467, + 296, + 586 + ], + "type": "text", + "content": "To maximize supervision and enable the model to learn effectively from the limited training data, this team integrates a Mixture-of-Experts (MoE) mechanism into Grounding DINO during the fine-tuning stage. The MoE framework allows different experts to specialize in distinct aspects of the data, facilitating the capture of more diverse and informative representations. It is hypothesized that this capability helps Grounding DINO better adapt to the target domain while making more efficient use of the available training data." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 586, + 296, + 647 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 586, + 296, + 647 + ], + "spans": [ + { + "bbox": [ + 55, + 586, + 296, + 647 + ], + "type": "text", + "content": "In this team's approach, the MoE mechanism is incorporated into the feed-forward network (FFN) layers of Grounding DINO's Cross-Modality Decoder. As illustrated in Figure 3, the MoE architecture consists of one shared expert and three router-selected experts." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 651, + 153, + 662 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 651, + 153, + 662 + ], + "spans": [ + { + "bbox": [ + 55, + 651, + 153, + 662 + ], + "type": "text", + "content": "4.1.2. Training Details" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 666, + 296, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 666, + 296, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 666, + 296, + 713 + ], + "type": "text", + "content": "A two-stage fine-tuning pipeline is adopted to adapt Grounding DINO for cross-domain few-shot object detection. In the first stage, the standard Grounding DINO (without the MoE substitution) is fine-tuned on the training data," + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 353, + 101, + 544, + 258 + ], + "blocks": [ + { + "bbox": [ + 353, + 101, + 544, + 258 + ], + "lines": [ + { + "bbox": [ + 353, + 101, + 544, + 258 + ], + "spans": [ + { + "bbox": [ + 353, + 101, + 544, + 258 + ], + "type": "image", + "image_path": "0b7da84e2775e9ddbfcbf86d3acb6b3e314368a8cf2dd3dcc8102d0f04f792a6.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 353, + 263, + 539, + 396 + ], + "blocks": [ + { + "bbox": [ + 353, + 263, + 539, + 396 + ], + "lines": [ + { + "bbox": [ + 353, + 263, + 539, + 396 + ], + "spans": [ + { + "bbox": [ + 353, + 263, + 539, + 396 + ], + "type": "image", + "image_path": "e99f2e71e525eaa1944ac9b617b933a5aa42cf899e9ed92b704bb56a99e68e67.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 313, + 417, + 555, + 440 + ], + "lines": [ + { + "bbox": [ + 313, + 417, + 555, + 440 + ], + "spans": [ + { + "bbox": [ + 313, + 417, + 555, + 440 + ], + "type": "text", + "content": "Figure 3. Team MoveFree: an illustration of the substitution of MoE into Grounding DINO's decoder layers." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 461, + 555, + 497 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 461, + 555, + 497 + ], + "spans": [ + { + "bbox": [ + 313, + 461, + 555, + 497 + ], + "type": "text", + "content": "with all parameters trainable except for the language encoder. In the second stage, the MoE architecture is introduced into the model." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 498, + 556, + 629 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 498, + 556, + 629 + ], + "spans": [ + { + "bbox": [ + 313, + 498, + 556, + 629 + ], + "type": "text", + "content": "For the second stage, the model is initialized using the weights obtained from the first stage, excluding the MoE components. The shared expert within the MoE is initialized with weights from the first stage, while the three router-selected experts are initialized using the open-source pre-trained weights of Grounding DINO. This initialization strategy facilitates effective learning from limited training data while retaining knowledge acquired during the initial stage. During this phase, only the MoE components and the detection head remain trainable, with all other parts of the model kept frozen." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 630, + 556, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 630, + 556, + 712 + ], + "spans": [ + { + "bbox": [ + 313, + 630, + 556, + 712 + ], + "type": "text", + "content": "Additionally, the self-supervised learning paradigm is applied in both stages to iteratively refine the training data and enhance performance. The training strictly adheres to the provided few-shot training set, without utilizing any external data. The overall approach is computationally efficient and can be executed on a single V100 GPU within a reasonable time frame." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 143, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 143, + 83 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 143, + 83 + ], + "type": "text", + "content": "4.2.AI4EarthLab" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 89, + 160, + 101 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 89, + 160, + 101 + ], + "spans": [ + { + "bbox": [ + 55, + 89, + 160, + 101 + ], + "type": "text", + "content": "4.2.1. Proposed Method" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 104, + 295, + 175 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 104, + 295, + 175 + ], + "spans": [ + { + "bbox": [ + 55, + 104, + 295, + 175 + ], + "type": "text", + "content": "Foundation models pretrained on large-scale datasets, such as GroundingDINO [43] and LAE-DINO [51], have demonstrated strong detection performance in cross-domain zero-shot and few-shot object detection tasks. Thus, the AI4EarthLab team is motivated to explore such foundation models for CD-FSOD." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 177, + 296, + 271 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 177, + 296, + 271 + ], + "spans": [ + { + "bbox": [ + 55, + 177, + 296, + 271 + ], + "type": "text", + "content": "As shown in Fig. 4, this team proposes an augmentation-search strategy for CD-FSOD, which leverages open-source data and transfers the model to novel target domains. Following the approaches in [15, 52], an efficient fine-tuning method is adopted to explore the cross-domain few-shot detection capabilities of foundation models, requiring only lightweight tuning to identify effective subfields. Code is made available3." + } + ] + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 56, + 281, + 294, + 373 + ], + "blocks": [ + { + "bbox": [ + 56, + 281, + 294, + 373 + ], + "lines": [ + { + "bbox": [ + 56, + 281, + 294, + 373 + ], + "spans": [ + { + "bbox": [ + 56, + 281, + 294, + 373 + ], + "type": "image", + "image_path": "43d8c11f2a43152c39a45bf877db09a41ebc256d1d670b340e4ee5df4386d35a.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 383, + 296, + 415 + ], + "lines": [ + { + "bbox": [ + 55, + 383, + 296, + 415 + ], + "spans": [ + { + "bbox": [ + 55, + 383, + 296, + 415 + ], + "type": "text", + "content": "Figure 4. Team AI4EarthLab: overall framework of augmentation-search strategy Enhance Then Search (ETS) with foundation model for CD-FSOD." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "bbox": [ + 54, + 432, + 295, + 624 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 432, + 295, + 624 + ], + "spans": [ + { + "bbox": [ + 54, + 432, + 295, + 624 + ], + "type": "text", + "content": "Data augmentation has proven effective in reducing semantic confusion during few-shot fine-tuning, particularly in cases where categories—such as certain fruits—are visually and semantically similar. Through extensive few-shot experiments, it is observed that integrating image-based augmentation with optimal domain search strategies can further enhance the performance of foundation models, though their upper performance bound remains uncertain. Building upon the open-source Grounding DINO framework, several commonly used image augmentation techniques are incorporated, and specific optimization objectives are defined to efficiently search for optimal subdomains within a broad domain space. This strategy facilitates more effective few-shot object detection. The proposed augmentation-search strategy consists of the following steps:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 623, + 295, + 696 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 623, + 295, + 696 + ], + "spans": [ + { + "bbox": [ + 55, + 623, + 295, + 696 + ], + "type": "text", + "content": "Step 1: Select the foundation model. This team adopts the Swin-B version of GroundingDINO as the foundation model, because of its best performance within the open-source model. This model has been pre-trained on a diverse set of large-scale datasets, including COCO, Objects365 (O365), GoldG, Cap4M, OpenImages, ODinW-35," + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 72, + 553, + 107 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 553, + 107 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 553, + 107 + ], + "type": "text", + "content": "and RefCOCO, which collectively provide strong generalization capabilities across multiple vision-language grounding tasks." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 108, + 555, + 263 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 108, + 555, + 263 + ], + "spans": [ + { + "bbox": [ + 313, + 108, + 555, + 263 + ], + "type": "text", + "content": "Step 2: Build a combined image augmentation pipeline. To improve the model's adaptability to various subdomains under limited data scenarios, this team construct a composite image augmentation pipeline. This pipeline randomly applies a combination of augmentation techniques such as CachedMosaic, YOLOXHSVRandomAug, RandomFlip, CachedMixUp, RandomResize, and RandomCrop. These methods are designed to enhance sample diversity, simulate domain shifts, and improve the model's robustness during fine-tuning. Additional data augmentation techniques, such as Copy-Paste, are also evaluated. However, these methods are found to introduce greater instability during few-shot fine-tuning." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 264, + 554, + 346 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 264, + 554, + 346 + ], + "spans": [ + { + "bbox": [ + 313, + 264, + 554, + 346 + ], + "type": "text", + "content": "Step 3: Construct an optimized target domain validation set. To evaluate adaptation performance, a subset of the annotated test data is sampled and used as a validation set. Rather than employing full annotations, coarse-grained labeling is applied to provide sufficient supervision for hyperparameter tuning, while significantly reducing annotation costs in the target domain." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 347, + 554, + 419 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 347, + 554, + 419 + ], + "spans": [ + { + "bbox": [ + 313, + 347, + 554, + 419 + ], + "type": "text", + "content": "Step 4: Search for the best model parameters on the validation set. Hyperparameter search and model selection are conducted based on validation performance. This process involves tuning the learning rate, augmentation intensity, and other training configurations to determine the optimal setup for effective domain adaptation." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 419, + 553, + 467 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 419, + 553, + 467 + ], + "spans": [ + { + "bbox": [ + 313, + 419, + 553, + 467 + ], + "type": "text", + "content": "Step 5: Perform inference on the test set. Once the optimal configuration is identified, the fine-tuned model is applied to the held-out test set to evaluate its final performance on the target domain." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 472, + 411, + 483 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 472, + 411, + 483 + ], + "spans": [ + { + "bbox": [ + 313, + 472, + 411, + 483 + ], + "type": "text", + "content": "4.2.2. Training Details" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 486, + 555, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 486, + 555, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 486, + 555, + 713 + ], + "type": "text", + "content": "Experiments are conducted on eight NVIDIA A100 GPUs, executing " + }, + { + "bbox": [ + 313, + 486, + 555, + 713 + ], + "type": "inline_equation", + "content": "8 \\times 50" + }, + { + "bbox": [ + 313, + 486, + 555, + 713 + ], + "type": "text", + "content": " experiment groups per round. During training, the optimal step size is selected based on historical performance to accelerate the fine-tuning process. Learning rate schedules are adjusted using milestone epochs, typically set to 1, 5, and 9 depending on the fine-tuning setting. The model uses 900 queries by default and a maximum text token length of 256. A BERT-based text encoder with BPE tokenization is employed. Both the feature enhancer and cross-modality decoder consist of six layers, and deformable attention is adopted in the image cross-attention modules. The loss function comprises classification (or contrastive) loss, box L1 loss, and GIoU loss. Following the Grounding DINO framework, Hungarian matching weights are set to 2.0 (classification), 5.0 (L1), and 2.0 (GIoU), while the final loss weights are 1.0, 5.0, and 2.0, respectively. Although various hyperparameter configurations are also explored, their impact is found to be relatively minor compared to that of data augmentation strategies." + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 66, + 702, + 231, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 702, + 231, + 712 + ], + "spans": [ + { + "bbox": [ + 66, + 702, + 231, + 712 + ], + "type": "text", + "content": "3https://github.com/jaychempan/ETS" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 302, + 733, + 309, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 733, + 309, + 742 + ], + "spans": [ + { + "bbox": [ + 302, + 733, + 309, + 742 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 111, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 111, + 83 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 111, + 83 + ], + "type": "text", + "content": "4.3. IDCFS" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 89, + 160, + 101 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 89, + 160, + 101 + ], + "spans": [ + { + "bbox": [ + 55, + 89, + 160, + 101 + ], + "type": "text", + "content": "4.3.1. Proposed Method" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 104, + 296, + 273 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 104, + 296, + 273 + ], + "spans": [ + { + "bbox": [ + 55, + 104, + 296, + 273 + ], + "type": "text", + "content": "The IDCFS team proposes a Pseudo-Label Driven Vision-Language Grounding method for CD-FSOD. As shown in Figure 5, the proposed method mainly combines large-scale foundation models with an iterative pseudo-labeling strategy. The GLIP [35] is being fine-tuned using three approaches, with the full model fine-tuned delivering the best results in most cases. To better exploit the support set, an iterative training strategy is proposed and applied, using high-confidence predictions as pseudo-labels to refine the model. Additionally, this team also fine-tunes Grounding DINO [43] with LoRA [21], efficiently modifying the attention layers while freezing the base model. Finally, the model ensemble with confidence-reweighted NMS is further adopted to boost accuracy. Code is made available4." + } + ] + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 57, + 284, + 295, + 411 + ], + "blocks": [ + { + "bbox": [ + 57, + 284, + 295, + 411 + ], + "lines": [ + { + "bbox": [ + 57, + 284, + 295, + 411 + ], + "spans": [ + { + "bbox": [ + 57, + 284, + 295, + 411 + ], + "type": "image", + "image_path": "85b77445909842aa7ec248e69a7ffa63394b4c2f44fa7b5de322b7355f4aed5c.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 419, + 295, + 442 + ], + "lines": [ + { + "bbox": [ + 55, + 419, + 295, + 442 + ], + "spans": [ + { + "bbox": [ + 55, + 419, + 295, + 442 + ], + "type": "text", + "content": "Figure 5. Team IDCFS: overview of the proposed Pseudo-Label Driven Vision-Language Grounding for CD-FSOD." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 54, + 455, + 295, + 634 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 455, + 295, + 634 + ], + "spans": [ + { + "bbox": [ + 54, + 455, + 295, + 634 + ], + "type": "text", + "content": "Fine-tuning on GLIP. Foundation models pretrained on large-scale datasets, such as GLIP [35], have demonstrated strong performance in zero-shot and few-shot object detection tasks. The proposed method is based on the GLIP-L model, which has been pretrained on several datasets including FourODs, GoldG, CC3M+12M, and SBU. For downstream tasks, this team tried three ways to fine-tune GLIP: 1) Full Model Fine-Tuning: fine-tune all parameters of the GLIP-L model using a relatively small learning rate " + }, + { + "bbox": [ + 54, + 455, + 295, + 634 + ], + "type": "inline_equation", + "content": "(\\mathrm{lr} = 2\\mathrm{e} - 5)" + }, + { + "bbox": [ + 54, + 455, + 295, + 634 + ], + "type": "text", + "content": ". 2) Prompt Tuning V1: fine-tune only the parameters of the text branch. 3) Prompt Tuning V2: This mode performs traditional prompt tuning by applying a linear layer to map the extracted text features. Experiments show that Full Model Fine-Tuning generally achieves the best fine-tuning performance in most cases." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 635, + 296, + 696 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 635, + 296, + 696 + ], + "spans": [ + { + "bbox": [ + 55, + 635, + 296, + 696 + ], + "type": "text", + "content": "Iterative Training. Given the scarcity, high cost, and limited availability of annotated data in few-shot learning scenarios, this team also designed an iterative training approach to train the model, as shown in Figure 6. Specifically, the proposed method first fine-tunes the model for" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 313, + 72, + 555, + 180 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 555, + 180 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 555, + 180 + ], + "type": "text", + "content": "a few steps using the available labeled data. Then, the fine-tuned model is used to predict the support set samples, selecting the predictions with high confidence as pseudolabels to update the label information of the support set samples. The model is then fine-tuned again. By iterating this process, the proposed method fully utilizes the information in the support set samples, achieving better performance while ensuring the robustness of the model, making it less susceptible to the influence of low-quality labels." + } + ] + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 315, + 193, + 555, + 303 + ], + "blocks": [ + { + "bbox": [ + 315, + 193, + 555, + 303 + ], + "lines": [ + { + "bbox": [ + 315, + 193, + 555, + 303 + ], + "spans": [ + { + "bbox": [ + 315, + 193, + 555, + 303 + ], + "type": "image", + "image_path": "e1bae98ebc90a5af43497e591c20abbf6d9c63d8ec25b909a6702559d0ae8005.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 313, + 309, + 553, + 322 + ], + "lines": [ + { + "bbox": [ + 313, + 309, + 553, + 322 + ], + "spans": [ + { + "bbox": [ + 313, + 309, + 553, + 322 + ], + "type": "text", + "content": "Figure 6. Team IDCFS: overview of the iterative training process." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 333, + 555, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 333, + 555, + 464 + ], + "spans": [ + { + "bbox": [ + 313, + 333, + 555, + 464 + ], + "type": "text", + "content": "Fine-tuning Grounding DINO with LoRA. The IDCFS team also uses Grounding DINO [43] as another foundation model to generate bounding boxes and classification probabilities. The LoRA [21] is used to fine-tune GroundingDINO on the few-shot training set. Specifically, this team adds bypass adapters to the linear projection layers (i.e., query, key, and value) of the attention mechanism in the visual backbone and BERT of Grounding DINO. To facilitate better adaptation to cross-domain datasets, the original model weights are frozen, and only the newly added parameters are trained." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 465, + 556, + 597 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 465, + 556, + 597 + ], + "spans": [ + { + "bbox": [ + 313, + 465, + 556, + 597 + ], + "type": "text", + "content": "Model Ensemble. To effectively combine the outputs of GLIP and Grounding DINO, a model ensemble strategy with confidence reweighting is employed. Specifically, the detection scores from each model are scaled by predefined reliability weights. The reweighted predictions are then merged and refined using Non-Maximum Suppression (NMS) [47] to eliminate redundant bounding boxes and produce the final fused results. This approach allows the more reliable model to have a greater influence on the final predictions, enhancing detection performance by leveraging the complementary strengths of both models." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 602, + 411, + 614 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 602, + 411, + 614 + ], + "spans": [ + { + "bbox": [ + 313, + 602, + 411, + 614 + ], + "type": "text", + "content": "4.3.2. Training Details" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 617, + 556, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 617, + 556, + 715 + ], + "spans": [ + { + "bbox": [ + 313, + 617, + 556, + 715 + ], + "type": "text", + "content": "For GLIP fine-tuning, the GLIP-L variant is used, which incorporates Swin-L [46] as the visual encoder and BERT [7] as the text encoder. The model is pre-trained on a variety of datasets, including FourODs [29-31], GoldG [27], CC3M+12M, and SBU [49]. During fine-tuning, full-model training is applied with a reduced learning rate of 2e-5, compared to the original setting of 1e-4 in GLIP. For Grounding DINO, the Swin-B [46] backbone is used as the vi" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 66, + 702, + 270, + 713 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 702, + 270, + 713 + ], + "spans": [ + { + "bbox": [ + 66, + 702, + 270, + 713 + ], + "type": "text", + "content": "4https://github.com/Pumpkinder/GLIP-CDFSOD" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 296, + 178 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 296, + 178 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 296, + 178 + ], + "type": "text", + "content": "sual encoder and BERT from Hugging Face [78] as the text encoder. The model is pre-trained on COCO [41], Objects365 [65], GoldG [27], Cap4M, OpenImages [31], ODinW-35 [33], and RefCOCO [27]. For the 1-shot and 5-shot settings on the CARPK dataset [20], no fine-tuning is performed. For 1-shot training on DeepFruits [60], only the backbone is fine-tuned using LoRA. In all other cases, LoRA is used to fine-tune both the backbone and the BERT text encoder." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 189, + 178, + 200 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 189, + 178, + 200 + ], + "spans": [ + { + "bbox": [ + 55, + 189, + 178, + 200 + ], + "type": "text", + "content": "4.4. FDUROILab_Lenovo" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 206, + 160, + 218 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 206, + 160, + 218 + ], + "spans": [ + { + "bbox": [ + 55, + 206, + 160, + 218 + ], + "type": "text", + "content": "4.4.1. Proposed Method" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 221, + 295, + 305 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 221, + 295, + 305 + ], + "spans": [ + { + "bbox": [ + 55, + 221, + 295, + 305 + ], + "type": "text", + "content": "Efficient Tuning. To enhance the model's adaptability in cross-domain few-shot detection (CDFSOD), this team proposes an efficient fine-tuning strategy. The proposed approach leverages data augmentation techniques to expand the training set and improve the model's ability to recognize objects in the target domain with proposed k-shot annotated samples." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 306, + 296, + 354 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 306, + 296, + 354 + ], + "spans": [ + { + "bbox": [ + 55, + 306, + 296, + 354 + ], + "type": "text", + "content": "Specifically, given a k-shot setting, where " + }, + { + "bbox": [ + 55, + 306, + 296, + 354 + ], + "type": "inline_equation", + "content": "\\mathbf{k}" + }, + { + "bbox": [ + 55, + 306, + 296, + 354 + ], + "type": "text", + "content": " represents the number of provided object samples, the proposed approach adopts a structured fine-tuning pipeline, which is shown in Figure 7." + } + ] + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 56, + 364, + 294, + 453 + ], + "blocks": [ + { + "bbox": [ + 56, + 364, + 294, + 453 + ], + "lines": [ + { + "bbox": [ + 56, + 364, + 294, + 453 + ], + "spans": [ + { + "bbox": [ + 56, + 364, + 294, + 453 + ], + "type": "image", + "image_path": "9f7c58c6153c5492de9247cf262c01c36f6cfc8b799078979c8b237f362f2ad7.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 462, + 295, + 484 + ], + "lines": [ + { + "bbox": [ + 55, + 462, + 295, + 484 + ], + "spans": [ + { + "bbox": [ + 55, + 462, + 295, + 484 + ], + "type": "text", + "content": "Figure 7. Team FDUROILab_Lenovo: overview of the efficient tuning and inference." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 498, + 296, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 498, + 296, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 498, + 296, + 713 + ], + "type": "text", + "content": "(1) Object Cropping and Augmentation. Using the provided bounding boxes of k-shot examples, the proposed method first crops the target objects from the original images. The cropped objects are then subjected to various data augmentation techniques, including flipping, rotation, grayscale conversion, and other transformations, to introduce diversity and improve generalization. (2) Object Rescaling and Random Pasting. The proposed method randomly rescales the augmented objects to different sizes and pastes these transformed objects to the original images at different locations. This step simulates new object placements and enhances the model's robustness to variations in object appearance and context. (3) Fine-Tuning with Augmented Data. The proposed method finetunes the open-vocabulary detection model with the augmented images. This enables the detector to better adapt to objects in the target domain, even with minimal labeled examples. Additionally, the augmented data effectively increases the number of" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 72, + 554, + 133 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 554, + 133 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 554, + 133 + ], + "type": "text", + "content": "training samples, mitigating the few-shot learning limitation and improving overall detection performance. Through this efficient fine-tuning approach, the finetuned model gains enhanced adaptability to new domains while maintaining the advantages of open-vocabulary detection." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 134, + 555, + 253 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 134, + 555, + 253 + ], + "spans": [ + { + "bbox": [ + 313, + 134, + 555, + 253 + ], + "type": "text", + "content": "Inference. Since the proposed approach is based on an open-vocabulary detection model, it requires access to the target category labels during inference, which is shown in Figure 7. To obtain these labels, this team utilizes Qwen2.5-VL [1] to generate the textual descriptions of the target categories. The retrieved target labels from Qwen2.5-VL are used as textual input to guide the detection process. Then, the open-vocabulary detection model [11] is used to identify and classify objects in the test image based on the provided text-based labels." + } + ] + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 316, + 264, + 555, + 377 + ], + "blocks": [ + { + "bbox": [ + 316, + 264, + 555, + 377 + ], + "lines": [ + { + "bbox": [ + 316, + 264, + 555, + 377 + ], + "spans": [ + { + "bbox": [ + 316, + 264, + 555, + 377 + ], + "type": "image", + "image_path": "c281bd39de4691807c301ba17b6d49273f8d7a7767a20c78259bbb45b9f42084.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 333, + 385, + 534, + 397 + ], + "lines": [ + { + "bbox": [ + 333, + 385, + 534, + 397 + ], + "spans": [ + { + "bbox": [ + 333, + 385, + 534, + 397 + ], + "type": "text", + "content": "Figure 8. Team FDUROILab_Lenovo: post processing." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 415, + 556, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 415, + 556, + 715 + ], + "spans": [ + { + "bbox": [ + 313, + 415, + 556, + 715 + ], + "type": "text", + "content": "Post-Process. Although existing open-vocabulary detectors possess strong open-set detection capabilities, their performance on the challenge test set remains suboptimal. Upon further analysis, this team found that while the detector can successfully identify most objects, its primary weakness lies in classification errors rather than detection failures. This indicates that the open-vocabulary detection model still struggles with accurate classification when adapting to objects in a new domain. To address this issue, the Qwen2.5-VL is introduced as an auxiliary classifier to refine the final predictions, which is shown in Figure 8. For each test image, this team prompts Qwen2.5-VL to describe the objects present in the scene and provide a list of candidate categories that are likely to appear in the image. After that, this team refines the output of the open-vocabulary detection model using one of two strategies: (1) Filtering. Remove objects that are classified incorrectly by the detector and are not listed by Qwen2.5-VL. (2) Reclassification: Assign all detected objects to one of the categories predicted by Qwen2.5-VL, ensuring consistency between the detected bounding boxes and the high-level scene understanding of the multimodal model. The choice between these two strategies depends on the specific test dataset. By leveraging Qwen2.5-VL as a post-processing step, this team effectively corrects classification errors and enhances the" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 733, + 308, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 733, + 308, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 733, + 308, + 741 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 297, + 97 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 297, + 97 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 297, + 97 + ], + "type": "text", + "content": "model's performance on unseen domains, leading to more accurate and reliable object detection results." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 104, + 153, + 115 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 104, + 153, + 115 + ], + "spans": [ + { + "bbox": [ + 55, + 104, + 153, + 115 + ], + "type": "text", + "content": "4.4.2. Training Details" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 119, + 296, + 251 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 119, + 296, + 251 + ], + "spans": [ + { + "bbox": [ + 55, + 119, + 296, + 251 + ], + "type": "text", + "content": "LLMDet [11] is adopted as the open-vocabulary detection model, with Swin-Large [46] serving as the visual backbone. The Qwen2.5-VL-72B [1] is introduced as the multimodal large language model (MLLM). Fine-tuning experiments are conducted on eight NVIDIA RTX 3090 GPUs, using a batch size of 8 and a learning rate of 1e-6. The number of training iterations varies across datasets and few-shot settings. For DeepFruits [60] and CarDD [76], the model is fine-tuned for 30, 50, and 100 batches under the 1-shot, 5-shot, and 10-shot settings. No fine-tuning is performed for CARPK [20]." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 251, + 296, + 396 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 251, + 296, + 396 + ], + "spans": [ + { + "bbox": [ + 55, + 251, + 296, + 396 + ], + "type": "text", + "content": "To enhance classification accuracy, dataset-specific post-processing strategies are applied. For DeepFruits, all detected objects are reclassified into one of the categories predicted by Qwen2.5-VL. In the case of CarDD, detected objects not belonging to the predefined categories are filtered out. As CARPK contains only a single object category, no additional classification is performed. However, further filtering is applied to remove overly large bounding boxes, which are likely to be incorrect, as the objects in this dataset are generally small. In all cases, Non-Maximum Suppression (NMS) is used to eliminate redundant or overlapping predictions." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 403, + 127, + 415 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 403, + 127, + 415 + ], + "spans": [ + { + "bbox": [ + 55, + 403, + 127, + 415 + ], + "type": "text", + "content": "4.5. HUSTLab" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 422, + 160, + 434 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 422, + 160, + 434 + ], + "spans": [ + { + "bbox": [ + 55, + 422, + 160, + 434 + ], + "type": "text", + "content": "4.5.1. Proposed Method" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 437, + 295, + 617 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 437, + 295, + 617 + ], + "spans": [ + { + "bbox": [ + 55, + 437, + 295, + 617 + ], + "type": "text", + "content": "The HUSTLab explores the usage of Qwen2.5VL, MM-GroundingDINO, and LLMDet for the open-source CD-FSOD. The proposed method can be divided into two distinct phases: 1) Obtaining text descriptions from the training set using the Qwen2.5VL model; 2) Selecting a base model, such as Grounding DINO or LLMDet, and fine-tuning it with CopyPaste data augmentation, followed by Adversarial Weight Perturbation (AWP) training to derive the final model and obtain test results. We observe that models like Grounding DINO possess robust object detection capabilities, and fine-tuning them with few-shot data significantly enhances detection performance in specific domains. Moreover, for training sets with limited samples, utilizing text descriptions generated by large-scale vision-language models proves highly effective." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 618, + 296, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 618, + 296, + 714 + ], + "spans": [ + { + "bbox": [ + 55, + 618, + 296, + 714 + ], + "type": "text", + "content": "Text Description Generation with a Large VLM. In this phase, this team leverages Qwen2.5VL to generate detailed text descriptions for the limited samples in the training set, extracting text-modal information from the images [50]. Converting visual-modal information into text-modal information helps eliminate noise and condense semantic content. These detailed text descriptions are robust and will be fully utilized during the testing phase to enhance cross" + } + ] + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 315, + 72, + 535, + 144 + ], + "blocks": [ + { + "bbox": [ + 315, + 72, + 535, + 144 + ], + "lines": [ + { + "bbox": [ + 315, + 72, + 535, + 144 + ], + "spans": [ + { + "bbox": [ + 315, + 72, + 535, + 144 + ], + "type": "image", + "image_path": "cda92802f910d1d9c86fcb9690a9c013f2143055bf9564f0a4664b98cfef3300.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 313, + 154, + 555, + 175 + ], + "lines": [ + { + "bbox": [ + 313, + 154, + 555, + 175 + ], + "spans": [ + { + "bbox": [ + 313, + 154, + 555, + 175 + ], + "type": "text", + "content": "Figure 9. Team HUSTLab: overall framework of the proposed method." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 199, + 506, + 211 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 199, + 506, + 211 + ], + "spans": [ + { + "bbox": [ + 313, + 199, + 506, + 211 + ], + "type": "text", + "content": "domain few-shot object detection performance." + } + ] + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 320, + 227, + 552, + 289 + ], + "blocks": [ + { + "bbox": [ + 320, + 227, + 552, + 289 + ], + "lines": [ + { + "bbox": [ + 320, + 227, + 552, + 289 + ], + "spans": [ + { + "bbox": [ + 320, + 227, + 552, + 289 + ], + "type": "image", + "image_path": "02bfe67dc0273f0b6bb1570a13e9e272e3fc2bb1f70285eea045d85d22cd74f4.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 321, + 297, + 544, + 308 + ], + "lines": [ + { + "bbox": [ + 321, + 297, + 544, + 308 + ], + "spans": [ + { + "bbox": [ + 321, + 297, + 544, + 308 + ], + "type": "text", + "content": "Figure 10. Team HUSTLab: text description generation [50]." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 323, + 555, + 467 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 323, + 555, + 467 + ], + "spans": [ + { + "bbox": [ + 313, + 323, + 555, + 467 + ], + "type": "text", + "content": "Training Phase. In this stage, this team first selects an appropriate base model—either Grounding DINO[43] or LLMDet—based[11] on its compatibility with the dataset. Using the zero-shot capabilities of the chosen base model, this team generates pseudo-labels, which are combined with ground-truth labels during training to regularize the model under few-shot conditions. To fine-tune the base model, this team uses CopyPaste[17] data augmentation and Adversarial Weight Perturbation (AWP) techniques[79]. This approach strengthens the model's generalization and robustness, enabling it to effectively handle cross-domain few-shot object detection tasks." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 475, + 411, + 487 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 475, + 411, + 487 + ], + "spans": [ + { + "bbox": [ + 313, + 475, + 411, + 487 + ], + "type": "text", + "content": "4.5.2. Training Details" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 491, + 555, + 645 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 491, + 555, + 645 + ], + "spans": [ + { + "bbox": [ + 313, + 491, + 555, + 645 + ], + "type": "text", + "content": "The model is fine-tuned on three datasets using the MM-GroundingDINO-Large implementation provided by MMDetection as the base object detection framework, with the aim of enhancing cross-domain detection capabilities. The performance largely depends on prompt design. Since part of the BERT-based text encoder is kept frozen during training, prompt quality plays a crucial role in boosting performance for certain object detection tasks. Prompts generated using Qwen2.5-VL are able to accurately describe the attribute features associated with abstract category names, thereby assisting the model in object localization and recognition. All experiments are conducted on " + }, + { + "bbox": [ + 313, + 491, + 555, + 645 + ], + "type": "inline_equation", + "content": "4 \\times" + }, + { + "bbox": [ + 313, + 491, + 555, + 645 + ], + "type": "text", + "content": " NVIDIA RTX 3090 GPUs." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 655, + 386, + 668 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 655, + 386, + 668 + ], + "spans": [ + { + "bbox": [ + 313, + 655, + 386, + 668 + ], + "type": "text", + "content": "4.6. TongjiLab" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 313, + 673, + 419, + 685 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 673, + 419, + 685 + ], + "spans": [ + { + "bbox": [ + 313, + 673, + 419, + 685 + ], + "type": "text", + "content": "4.6.1. Proposed Method" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 313, + 689, + 554, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 689, + 554, + 714 + ], + "spans": [ + { + "bbox": [ + 313, + 689, + 554, + 714 + ], + "type": "text", + "content": "The TongjiLab proposes ProtoDINO, an innovative approach for CD-FSOD under the open-set setting, building" + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 296, + 323 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 296, + 323 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 296, + 323 + ], + "type": "text", + "content": "upon GroundingDINO [43] as the baseline model. To improve the target classification performance of the baseline model, the CLIP model [22, 54] is employed to extract both local and global image features from a limited set of target domain samples. These features are subsequently used to construct support sets, which serve as the foundation for building local prototype and global prototype networks, respectively. In addition, a text prototype network is developed using the CLIP model. During the target detection phase, visual features are extracted from each image query using CLIP. The L2 distances between these visual features and the local prototypes, global prototypes, and text prototypes are then computed, with these distances serving as one of the metrics for target classification. Furthermore, a car-damage-detection model5, implemented as a vehicle appearance damage classification model based on the Vision Transformer (ViT), is incorporated. For the final target classification, matching probabilities derived from the GroundingDINO model, the car-damage-detection model, and the prototype networks [66] are weighted and combined to produce the overall classification metric." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 324, + 296, + 361 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 324, + 296, + 361 + ], + "spans": [ + { + "bbox": [ + 55, + 324, + 296, + 361 + ], + "type": "text", + "content": "The framework of the proposed ProtoDINO is depicted in Fig. 11. Overall, ProtoDINO operates in two key stages: prototype construction and target detection." + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 56, + 371, + 294, + 516 + ], + "blocks": [ + { + "bbox": [ + 56, + 371, + 294, + 516 + ], + "lines": [ + { + "bbox": [ + 56, + 371, + 294, + 516 + ], + "spans": [ + { + "bbox": [ + 56, + 371, + 294, + 516 + ], + "type": "image", + "image_path": "8e3b6e9faa6f1d53069bf24196c367f48a6404b2c370382e50a39429b69ee961.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 525, + 295, + 546 + ], + "lines": [ + { + "bbox": [ + 55, + 525, + 295, + 546 + ], + "spans": [ + { + "bbox": [ + 55, + 525, + 295, + 546 + ], + "type": "text", + "content": "Figure 11. Team TongjiLab: framework of the proposed ProtoDINO." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 562, + 296, + 693 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 562, + 296, + 693 + ], + "spans": [ + { + "bbox": [ + 55, + 562, + 296, + 693 + ], + "type": "text", + "content": "Prototype Construction. During the prototype construction phase, this team crops few-shot learning images based on their annotations and generates visual embeddings as local feature prototypes " + }, + { + "bbox": [ + 55, + 562, + 296, + 693 + ], + "type": "inline_equation", + "content": "c_{local}" + }, + { + "bbox": [ + 55, + 562, + 296, + 693 + ], + "type": "text", + "content": " for these local patches using the CLIP model. For 5-shot and 10-shot settings, " + }, + { + "bbox": [ + 55, + 562, + 296, + 693 + ], + "type": "inline_equation", + "content": "c_{local}" + }, + { + "bbox": [ + 55, + 562, + 296, + 693 + ], + "type": "text", + "content": " is computed as the mean of all visual embeddings within the same category. Similarly, global feature prototypes " + }, + { + "bbox": [ + 55, + 562, + 296, + 693 + ], + "type": "inline_equation", + "content": "c_{global}" + }, + { + "bbox": [ + 55, + 562, + 296, + 693 + ], + "type": "text", + "content": " are derived by encoding entire images through CLIP and applying the same averaging strategy across categories. For each category text " + }, + { + "bbox": [ + 55, + 562, + 296, + 693 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 55, + 562, + 296, + 693 + ], + "type": "text", + "content": ", this team builds the text prototype " + }, + { + "bbox": [ + 55, + 562, + 296, + 693 + ], + "type": "inline_equation", + "content": "c_{text}" + }, + { + "bbox": [ + 55, + 562, + 296, + 693 + ], + "type": "text", + "content": " using CLIP as the text encoder." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 391, + 82, + 555, + 113 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 391, + 82, + 555, + 113 + ], + "spans": [ + { + "bbox": [ + 391, + 82, + 555, + 113 + ], + "type": "interline_equation", + "content": "c _ {l o c a l} ^ {(n)} = \\frac {1}{n} \\sum_ {i = 1} ^ {n} F _ {c r o p} ^ {(i)} \\tag {1}", + "image_path": "c814ade6f48089cca51eff010a2397c2da259ccedc9241f7eec304464c67d65b.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 391, + 118, + 555, + 149 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 391, + 118, + 555, + 149 + ], + "spans": [ + { + "bbox": [ + 391, + 118, + 555, + 149 + ], + "type": "interline_equation", + "content": "c _ {g l o b a l} ^ {(n)} = \\frac {1}{n} \\sum_ {i = 1} ^ {n} F _ {i} ^ {(i)} \\tag {2}", + "image_path": "1ab852e9315483efc47560a3dcb6a31d41a3b83a16d826a076224a6a2523400e.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 388, + 155, + 555, + 171 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 388, + 155, + 555, + 171 + ], + "spans": [ + { + "bbox": [ + 388, + 155, + 555, + 171 + ], + "type": "interline_equation", + "content": "c _ {t e x t} ^ {(n)} = f _ {\\text {c l i p - t e x t}} \\left(t ^ {(n)}\\right) \\tag {3}", + "image_path": "763a654b6700a499b38d08923d01cb73026de94e7f0b3cbbc9baa9b28877de63.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 175, + 555, + 428 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 175, + 555, + 428 + ], + "spans": [ + { + "bbox": [ + 313, + 175, + 555, + 428 + ], + "type": "text", + "content": "Target Detection. In the target detection stage, the input image and target category texts are processed by GroundingDINO to generate bounding boxes and initial classification probabilities. These bounding boxes are used to crop local regions from the image, which are then encoded by CLIP to obtain their visual features " + }, + { + "bbox": [ + 313, + 175, + 555, + 428 + ], + "type": "inline_equation", + "content": "F_{crop}" + }, + { + "bbox": [ + 313, + 175, + 555, + 428 + ], + "type": "text", + "content": ". To classify these regions, this team computes the L2 distances between their representations and the precomputed prototypes as in Eq. 4. These distances are transformed into probability distributions via a softmax operation, yielding the prototype network's classification output as in Eq. 5. Simultaneously, the cropped regions are evaluated by a pre-trained car-damage-detection model (based on Vision Transformer) to generate additional classification probabilities. The final classification decision is derived by aggregating probabilities from GroundingDINO, the car-damage-detection model, and the prototype network through a weighted summation as in Eq. 6. This fusion approach effectively integrates geometric localization from GroundingDINO, cross-modal semantics from CLIP, domain-specific insights from the car-damage-detection model, and few-shot prototype matching." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 377, + 436, + 555, + 468 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 377, + 436, + 555, + 468 + ], + "spans": [ + { + "bbox": [ + 377, + 436, + 555, + 468 + ], + "type": "interline_equation", + "content": "d (u, v) = \\sqrt {\\sum_ {n} \\left(u ^ {n} - v ^ {n}\\right) ^ {2}} \\tag {4}", + "image_path": "1e01daa6fc606ca5ce479b785dcee0296e8a6449dc10479dec8c41a759e28045.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 365, + 473, + 555, + 496 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 365, + 473, + 555, + 496 + ], + "spans": [ + { + "bbox": [ + 365, + 473, + 555, + 496 + ], + "type": "interline_equation", + "content": "p r o b s _ {p r o t o} = - \\frac {1}{\\sigma} \\cdot e ^ {N o r m [ d (F, c) ]} \\tag {5}", + "image_path": "34d5c7327c4b757ab819bc31c0eaaba07ef74cf4419324e6da82d14ccdbd7d0e.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 383, + 502, + 554, + 526 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 383, + 502, + 554, + 526 + ], + "spans": [ + { + "bbox": [ + 383, + 502, + 554, + 526 + ], + "type": "interline_equation", + "content": "p r o b s = \\sum_ {i} w _ {i} \\cdot p r o b s _ {i} \\tag {6}", + "image_path": "fe1b95b5306e4a2471bb335e8e5c2e0ea900709f9c7ddd2f832aaf841afa5b00.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 314, + 531, + 411, + 543 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 531, + 411, + 543 + ], + "spans": [ + { + "bbox": [ + 314, + 531, + 411, + 543 + ], + "type": "text", + "content": "4.6.2. Training Details" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 545, + 555, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 545, + 555, + 715 + ], + "spans": [ + { + "bbox": [ + 313, + 545, + 555, + 715 + ], + "type": "text", + "content": "The implementation is carried out on a server running CentOS 7, equipped with a single RTX 6000 Ada GPU. For the CLIP model, the DFN5B-CLIP-ViT-H-14-378 implementation is selected due to its balance between performance and efficiency in processing visual and textual data. For the GroundingDINO model, the official implementation is used. Based on empirical observations, the threshold parameter " + }, + { + "bbox": [ + 313, + 545, + 555, + 715 + ], + "type": "inline_equation", + "content": "\\sigma" + }, + { + "bbox": [ + 313, + 545, + 555, + 715 + ], + "type": "text", + "content": " is set to 0.5, which provides optimal results across various scenarios. In GroundingDINO, the bounding box confidence threshold (BOX_THRESHOLD) is set to 0.3. For the final decision fusion, the weighting coefficients for integrating outputs from multiple modules are empirically assigned as: " + }, + { + "bbox": [ + 313, + 545, + 555, + 715 + ], + "type": "inline_equation", + "content": "w_{\\mathrm{local}} = 0.25" + }, + { + "bbox": [ + 313, + 545, + 555, + 715 + ], + "type": "text", + "content": " (local prototype network), " + }, + { + "bbox": [ + 313, + 545, + 555, + 715 + ], + "type": "inline_equation", + "content": "w_{\\mathrm{global}} = 0.15" + }, + { + "bbox": [ + 313, + 545, + 555, + 715 + ], + "type": "text", + "content": " (global prototype network), " + }, + { + "bbox": [ + 313, + 545, + 555, + 715 + ], + "type": "inline_equation", + "content": "w_{\\mathrm{text}} = 0.4" + }, + { + "bbox": [ + 313, + 545, + 555, + 715 + ], + "type": "text", + "content": " (text" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 66, + 702, + 291, + 713 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 702, + 291, + 713 + ], + "spans": [ + { + "bbox": [ + 66, + 702, + 291, + 713 + ], + "type": "text", + "content": "5 https://huggingface.co/beingamit99/car_damage_detector/tree/main" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 300, + 732, + 312, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 732, + 312, + 742 + ], + "spans": [ + { + "bbox": [ + 300, + 732, + 312, + 742 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 297, + 97 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 297, + 97 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 297, + 97 + ], + "type": "text", + "content": "prototype network), " + }, + { + "bbox": [ + 55, + 72, + 297, + 97 + ], + "type": "inline_equation", + "content": "w_{\\mathrm{dino}} = 0.1" + }, + { + "bbox": [ + 55, + 72, + 297, + 97 + ], + "type": "text", + "content": " (GroundingDINO), and " + }, + { + "bbox": [ + 55, + 72, + 297, + 97 + ], + "type": "inline_equation", + "content": "w_{\\mathrm{car}} = 0.1" + }, + { + "bbox": [ + 55, + 72, + 297, + 97 + ], + "type": "text", + "content": " (car-damage-detection model)." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 105, + 122, + 116 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 105, + 122, + 116 + ], + "spans": [ + { + "bbox": [ + 55, + 105, + 122, + 116 + ], + "type": "text", + "content": "4.7. Manifold" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 123, + 160, + 135 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 123, + 160, + 135 + ], + "spans": [ + { + "bbox": [ + 55, + 123, + 160, + 135 + ], + "type": "text", + "content": "4.7.1. Proposed Method" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 54, + 138, + 296, + 293 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 138, + 296, + 293 + ], + "spans": [ + { + "bbox": [ + 54, + 138, + 296, + 293 + ], + "type": "text", + "content": "To address the challenge of few-shot object detection in cross-domain scenarios, the Manifold team proposes a novel approach based on the detection pipeline of a two-stage object detection algorithm. As illustrated in the Figure. 12, the proposed method first employs an open set object detection network, which is trained on public datasets, to detect objects in the query image. However, due to the domain gap between the pretraining datasets and the query datasets, the detection results cannot be directly trusted. Therefore, this team treats these results as region proposals that may contain objects of interest. Subsequently, this team combines the instance features from the support set for classification to obtain the final detection results." + } + ] + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 58, + 305, + 296, + 412 + ], + "blocks": [ + { + "bbox": [ + 58, + 305, + 296, + 412 + ], + "lines": [ + { + "bbox": [ + 58, + 305, + 296, + 412 + ], + "spans": [ + { + "bbox": [ + 58, + 305, + 296, + 412 + ], + "type": "image", + "image_path": "e2f28966d29b1d9309d4b9cb28111ee3b3de5a35e827252d153c528d74e5800c.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 422, + 281, + 434 + ], + "lines": [ + { + "bbox": [ + 67, + 422, + 281, + 434 + ], + "spans": [ + { + "bbox": [ + 67, + 422, + 281, + 434 + ], + "type": "text", + "content": "Figure 12. Team Manifold: overall framework of GDPRE." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 449, + 295, + 593 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 449, + 295, + 593 + ], + "spans": [ + { + "bbox": [ + 55, + 449, + 295, + 593 + ], + "type": "text", + "content": "GroundingDINO-based Region Proposals. The GroundingDINO is selected as the pre-trained open-set object detector. It can detect objects of interest in images using input text, and it was pre-trained on seven datasets: COCO, O365, GoldG, Cap4M, OpenImage, ODinW-35, and RefCOCO. This pre-training gives it good detection capabilities for most real-world objects. However, in cross-domain few-shot scenarios, its detection effectiveness is suboptimal. For example, avocados may be misclassified as oranges because of the higher frequency of oranges in the pre-training data. Despite this, GroundingDINO can still provide region proposals for potential objects of interest in query images." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 594, + 295, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 594, + 295, + 714 + ], + "spans": [ + { + "bbox": [ + 55, + 594, + 295, + 714 + ], + "type": "text", + "content": "ResNet-based Feature Classification. After obtaining region proposals, this team classifies the objects within them using support set images. Given the limited samples and significant intra-class variations in image space, directly matching support instances with query candidates in this space yields poor results. ResNet pre-trained on ImageNet is used to extract image features, mapping instances to a more robust feature space. To address scale differences, this team resize instances in both support and region proposals images to " + }, + { + "bbox": [ + 55, + 594, + 295, + 714 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 55, + 594, + 295, + 714 + ], + "type": "text", + "content": " for feature extraction. Considering" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 72, + 555, + 180 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 555, + 180 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 555, + 180 + ], + "type": "text", + "content": "some classes have large intra-class and small inter-class differences, this team treats each instance's feature vector in multi-shot settings as a separate support vector rather than averaging them by class. This team calculates the cosine similarity between candidate region instances and support set instance feature vectors, assigning the region proposal instance to the class of the most similar support instance. This yields the final detection results, and the cosine similarity serves as the prediction confidence." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 189, + 443, + 201 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 189, + 443, + 201 + ], + "spans": [ + { + "bbox": [ + 313, + 189, + 443, + 201 + ], + "type": "text", + "content": "4.7.2. Implementation Details" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 204, + 555, + 336 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 204, + 555, + 336 + ], + "spans": [ + { + "bbox": [ + 313, + 204, + 555, + 336 + ], + "type": "text", + "content": "Given that both GroundingDINO and ResNet are pretrained on large-scale datasets, fine-tuning them under few-shot constraints—where the training classes do not overlap with the test classes—can be challenging. As a result, the pre-trained model weights are kept frozen. This approach requires minimal computational resources and can be executed on a laptop equipped with an RTX 4060 GPU. During inference, the category names from the test dataset are used as prompt inputs for GroundingDINO, and the BOX_THRESHOLD is set to 0.1 to obtain the final detection results." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 346, + 362, + 357 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 346, + 362, + 357 + ], + "spans": [ + { + "bbox": [ + 313, + 346, + 362, + 357 + ], + "type": "text", + "content": "4.8.MXT" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 365, + 418, + 376 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 365, + 418, + 376 + ], + "spans": [ + { + "bbox": [ + 313, + 365, + 418, + 376 + ], + "type": "text", + "content": "4.8.1. Proposed Method" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 380, + 555, + 501 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 380, + 555, + 501 + ], + "spans": [ + { + "bbox": [ + 313, + 380, + 555, + 501 + ], + "type": "text", + "content": "This team proposes a Domain Adaptation Enhancement Module (DAEM) for Cross-Domain Few-Shot Object Detection (CD-FSOD), built as an extension to the CD-ViTO framework. While CD-ViTO provides a strong foundation for open-set cross-domain detection with DinoV2 ViT-L backbone, it still faces challenges with significant domain shifts. As illustrated in Fig 13, the DAEM integrates seamlessly with the DinoV2 ViT-L backbone and enhances domain adaptation through two complementary mechanisms: batch enhancement and feature alignment." + } + ] + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 317, + 512, + 555, + 643 + ], + "blocks": [ + { + "bbox": [ + 317, + 512, + 555, + 643 + ], + "lines": [ + { + "bbox": [ + 317, + 512, + 555, + 643 + ], + "spans": [ + { + "bbox": [ + 317, + 512, + 555, + 643 + ], + "type": "image", + "image_path": "01810eb83ce91b149e958f580b0174e2285a8dc0d030ed2e1c8e83e917133bfe.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 331, + 651, + 537, + 662 + ], + "lines": [ + { + "bbox": [ + 331, + 651, + 537, + 662 + ], + "spans": [ + { + "bbox": [ + 331, + 651, + 537, + 662 + ], + "type": "text", + "content": "Figure 13. Team DAEM: overall of the proposed model." + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 677, + 555, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 677, + 555, + 715 + ], + "spans": [ + { + "bbox": [ + 313, + 677, + 555, + 715 + ], + "type": "text", + "content": "Batch Enhancement Module. The batch enhancement module increases training diversity through controlled style transfer between domains. For both source and target do" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 732, + 310, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 732, + 310, + 742 + ], + "spans": [ + { + "bbox": [ + 300, + 732, + 310, + 742 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 295, + 97 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 295, + 97 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 295, + 97 + ], + "type": "text", + "content": "main images, this team introduces cross-domain characteristics while preserving semantic content:" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 108, + 110, + 295, + 135 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 110, + 295, + 135 + ], + "spans": [ + { + "bbox": [ + 108, + 110, + 295, + 135 + ], + "type": "interline_equation", + "content": "\\operatorname {i m g} _ {\\text {s t y l e d}} = \\sigma_ {t} \\cdot \\frac {\\operatorname {i m g} - \\mu_ {s}}{\\sigma_ {s}} + \\mu_ {t} \\tag {7}", + "image_path": "542ce157e969fcbf8441ad06bf12f80ab75fa1dfc883ef3975a5aa3501914467.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 143, + 296, + 179 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 143, + 296, + 179 + ], + "spans": [ + { + "bbox": [ + 55, + 143, + 296, + 179 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 55, + 143, + 296, + 179 + ], + "type": "inline_equation", + "content": "\\mu_s, \\sigma_s" + }, + { + "bbox": [ + 55, + 143, + 296, + 179 + ], + "type": "text", + "content": " are source image statistics and " + }, + { + "bbox": [ + 55, + 143, + 296, + 179 + ], + "type": "inline_equation", + "content": "\\mu_t, \\sigma_t" + }, + { + "bbox": [ + 55, + 143, + 296, + 179 + ], + "type": "text", + "content": " are target domain statistics. The enhancement strength " + }, + { + "bbox": [ + 55, + 143, + 296, + 179 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 55, + 143, + 296, + 179 + ], + "type": "text", + "content": " gradually increases during training as follows:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 123, + 191, + 295, + 217 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 123, + 191, + 295, + 217 + ], + "spans": [ + { + "bbox": [ + 123, + 191, + 295, + 217 + ], + "type": "interline_equation", + "content": "\\alpha = \\min (1. 0, \\frac {t}{T _ {\\text {w a r m u p}}}) \\tag {8}", + "image_path": "0693ff91746e274d37bc3a660190108db992ba7f396a2a55c15d92d173af7306.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 225, + 296, + 261 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 225, + 296, + 261 + ], + "spans": [ + { + "bbox": [ + 55, + 225, + 296, + 261 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 55, + 225, + 296, + 261 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 55, + 225, + 296, + 261 + ], + "type": "text", + "content": " is the current iteration and " + }, + { + "bbox": [ + 55, + 225, + 296, + 261 + ], + "type": "inline_equation", + "content": "T_{warmup}" + }, + { + "bbox": [ + 55, + 225, + 296, + 261 + ], + "type": "text", + "content": " is set to 500. This gradual adaptation prevents disrupting the pre-trained DinoV2 ViT-L features early in training." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 262, + 296, + 310 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 262, + 296, + 310 + ], + "spans": [ + { + "bbox": [ + 55, + 262, + 296, + 310 + ], + "type": "text", + "content": "Feature Alignment Module. The feature alignment module employs two complementary strategies to reduce domain gaps: Maximum Mean Discrepancy (MMD) and style-based adaptation." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 311, + 296, + 371 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 311, + 296, + 371 + ], + "spans": [ + { + "bbox": [ + 55, + 311, + 296, + 371 + ], + "type": "text", + "content": "MMD Loss: The Maximum Mean Discrepancy is applied to reduce distribution differences between features from the source and target domains. MMD measures the distance between feature distributions in a reproducing kernel Hilbert space:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 64, + 393, + 295, + 443 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 64, + 393, + 295, + 443 + ], + "spans": [ + { + "bbox": [ + 64, + 393, + 295, + 443 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {M M D} \\left(\\mathbf {X} _ {s}, \\mathbf {X} _ {t}\\right) = \\left\\| \\frac {1}{n _ {s}} \\sum_ {i = 1} ^ {n _ {s}} \\phi \\left(\\mathbf {x} _ {s} ^ {i}\\right) - \\frac {1}{n _ {t}} \\sum_ {j = 1} ^ {n _ {t}} \\phi \\left(\\mathbf {x} _ {t} ^ {j}\\right) \\right\\| _ {\\mathcal {H}} ^ {2} \\tag {9}", + "image_path": "2b50d3312342b5cd025a42c775638c146fa67b6a97da6349fafce67842eb6f89.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 55, + 445, + 296, + 505 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 445, + 296, + 505 + ], + "spans": [ + { + "bbox": [ + 55, + 445, + 296, + 505 + ], + "type": "text", + "content": "This is implemented with multiple Gaussian kernels with bandwidths " + }, + { + "bbox": [ + 55, + 445, + 296, + 505 + ], + "type": "inline_equation", + "content": "\\sigma \\in \\{0.5, 1.0, 2.0, 5.0\\}" + }, + { + "bbox": [ + 55, + 445, + 296, + 505 + ], + "type": "text", + "content": " to capture similarities at different feature scales. This approach guides DinoV2 ViT-L to preserve its powerful representation abilities while adapting to target domains with minimal samples." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 55, + 506, + 296, + 552 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 506, + 296, + 552 + ], + "spans": [ + { + "bbox": [ + 55, + 506, + 296, + 552 + ], + "type": "text", + "content": "Style Loss: Style-based adaptation addresses visual variations between domains that are unrelated to object semantics. For feature maps " + }, + { + "bbox": [ + 55, + 506, + 296, + 552 + ], + "type": "inline_equation", + "content": "\\mathbf{F}" + }, + { + "bbox": [ + 55, + 506, + 296, + 552 + ], + "type": "text", + "content": ", the channel-wise statistics is transformed as:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 127, + 566, + 295, + 592 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 127, + 566, + 295, + 592 + ], + "spans": [ + { + "bbox": [ + 127, + 566, + 295, + 592 + ], + "type": "interline_equation", + "content": "\\hat {\\mathbf {F}} = \\sigma_ {t} \\cdot \\frac {\\mathbf {F} - \\mu_ {s}}{\\sigma_ {s}} + \\mu_ {t} \\tag {10}", + "image_path": "faf9e372c6d33c67159205a2853d4200013b769eca66abbbf066658facb6487a.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 55, + 600, + 296, + 647 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 600, + 296, + 647 + ], + "spans": [ + { + "bbox": [ + 55, + 600, + 296, + 647 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 55, + 600, + 296, + 647 + ], + "type": "inline_equation", + "content": "\\mu_s, \\sigma_s" + }, + { + "bbox": [ + 55, + 600, + 296, + 647 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 55, + 600, + 296, + 647 + ], + "type": "inline_equation", + "content": "\\mu_t, \\sigma_t" + }, + { + "bbox": [ + 55, + 600, + 296, + 647 + ], + "type": "text", + "content": " are the channel statistics of source and target features. This approach helps Di-noV2 ViT-L focus on domain-invariant object characteristics rather than domain-specific visual styles." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 55, + 649, + 296, + 685 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 649, + 296, + 685 + ], + "spans": [ + { + "bbox": [ + 55, + 649, + 296, + 685 + ], + "type": "text", + "content": "The overall training objective combines the original CDViTO detection loss with the proposed domain adaptation components:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 90, + 701, + 295, + 715 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 90, + 701, + 295, + 715 + ], + "spans": [ + { + "bbox": [ + 90, + 701, + 295, + 715 + ], + "type": "interline_equation", + "content": "\\mathcal {L} = \\mathcal {L} _ {\\text {d e t}} + \\lambda_ {m m d} \\mathcal {L} _ {M M D} + \\lambda_ {\\text {s t y l e}} \\mathcal {L} _ {\\text {s t y l e}} \\tag {11}", + "image_path": "13a996835fc99a32166076ccff99c3a26e87309aafa00d6ad1749b0033fd6aab.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 314, + 72, + 411, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 72, + 411, + 84 + ], + "spans": [ + { + "bbox": [ + 314, + 72, + 411, + 84 + ], + "type": "text", + "content": "4.8.2. Training Details" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 87, + 555, + 194 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 87, + 555, + 194 + ], + "spans": [ + { + "bbox": [ + 313, + 87, + 555, + 194 + ], + "type": "text", + "content": "Following the pretrain–finetune–test pipeline established in the CD-FSOD benchmark, the pretrained DinoV2 ViT-L backbone from CD-ViTO is utilized. During fine-tuning, the backbone and Region Proposal Network (RPN) are selectively frozen, while the Domain-Adaptive Enhancement Modules (DAEM) and ROI Heads are optimized. This strategy preserves the general representational power of DinoV2 ViT-L while allowing domain-specific components to adapt effectively." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 196, + 555, + 279 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 196, + 555, + 279 + ], + "spans": [ + { + "bbox": [ + 313, + 196, + 555, + 279 + ], + "type": "text", + "content": "Training is conducted on NVIDIA A800 GPUs, with hyperparameters determined through extensive experimentation: the MMD loss weight is set to " + }, + { + "bbox": [ + 313, + 196, + 555, + 279 + ], + "type": "inline_equation", + "content": "\\lambda_{mmd} = 0.16" + }, + { + "bbox": [ + 313, + 196, + 555, + 279 + ], + "type": "text", + "content": ", the style loss weight to " + }, + { + "bbox": [ + 313, + 196, + 555, + 279 + ], + "type": "inline_equation", + "content": "\\lambda_{style} = 0.12" + }, + { + "bbox": [ + 313, + 196, + 555, + 279 + ], + "type": "text", + "content": ", and the batch enhancement strength to " + }, + { + "bbox": [ + 313, + 196, + 555, + 279 + ], + "type": "inline_equation", + "content": "\\alpha_{max} = 0.8" + }, + { + "bbox": [ + 313, + 196, + 555, + 279 + ], + "type": "text", + "content": ". Differential learning rates are applied, using a multiplier of 2.0 for the DAEM modules and bias terms, with a base learning rate of " + }, + { + "bbox": [ + 313, + 196, + 555, + 279 + ], + "type": "inline_equation", + "content": "1 \\times 10^{-4}" + }, + { + "bbox": [ + 313, + 196, + 555, + 279 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 313, + 281, + 556, + 399 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 281, + 556, + 399 + ], + "spans": [ + { + "bbox": [ + 313, + 281, + 556, + 399 + ], + "type": "text", + "content": "A warm-up phase of 500 iterations is introduced to gradually increase adaptation strength. This helps stabilize early-stage training and prevents disruption of the pretrained DinoV2 ViT-L features. Optimization is performed using stochastic gradient descent (SGD) with a momentum of 0.9 and a weight decay of " + }, + { + "bbox": [ + 313, + 281, + 556, + 399 + ], + "type": "inline_equation", + "content": "1 \\times 10^{-4}" + }, + { + "bbox": [ + 313, + 281, + 556, + 399 + ], + "type": "text", + "content": ". The model reaches optimal cross-domain performance after approximately 50 epochs. The proposed approach maintains the efficiency of CD-ViTO while delivering substantial improvements in challenging cross-domain few-shot detection scenarios." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 313, + 411, + 524, + 426 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 411, + 524, + 426 + ], + "spans": [ + { + "bbox": [ + 313, + 411, + 524, + 426 + ], + "type": "text", + "content": "5. Special Closed-Source Track Methods" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 314, + 432, + 368, + 443 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 432, + 368, + 443 + ], + "spans": [ + { + "bbox": [ + 314, + 432, + 368, + 443 + ], + "type": "text", + "content": "5.1. X-Few" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 313, + 449, + 419, + 461 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 449, + 419, + 461 + ], + "spans": [ + { + "bbox": [ + 313, + 449, + 419, + 461 + ], + "type": "text", + "content": "5.1.1. Proposed Method" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 313, + 465, + 554, + 548 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 465, + 554, + 548 + ], + "spans": [ + { + "bbox": [ + 313, + 465, + 554, + 548 + ], + "type": "text", + "content": "To address the challenges of domain shift and category confusion arising from limited annotated data in CD-FSOD, the X-Few team proposes a novel domain adaptation strategy based on the Instance Feature Caching (IFC) mechanism. The framework of the proposed method is shown in Fig. 14, which is mainly built upon the CD-ViTO baseline. Code is made available " + }, + { + "bbox": [ + 313, + 465, + 554, + 548 + ], + "type": "inline_equation", + "content": "^{6}" + }, + { + "bbox": [ + 313, + 465, + 554, + 548 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 313, + 549, + 555, + 668 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 549, + 555, + 668 + ], + "spans": [ + { + "bbox": [ + 313, + 549, + 555, + 668 + ], + "type": "text", + "content": "Intuitively, the IFC module is proposed to construct a cache model that could store and dynamically retrieve discriminative instance-level features from the target domain, alleviating model degradation caused by cross-domain distribution discrepancy in the few-shot supervision situation. Specifically, the IFC mechanism facilitates knowledge transfer through prototype-based feature alignment and an attention-guided memory update strategy, enhancing the model's generalization capability in the data-scarce cross-domain scenario." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 313, + 670, + 554, + 694 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 670, + 554, + 694 + ], + "spans": [ + { + "bbox": [ + 313, + 670, + 554, + 694 + ], + "type": "text", + "content": "Instance Feature Caching Construction. Given a support set " + }, + { + "bbox": [ + 313, + 670, + 554, + 694 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 313, + 670, + 554, + 694 + ], + "type": "text", + "content": " comprising " + }, + { + "bbox": [ + 313, + 670, + 554, + 694 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 313, + 670, + 554, + 694 + ], + "type": "text", + "content": " target categories, each consisting" + } + ] + } + ], + "index": 23 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 324, + 702, + 544, + 713 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 324, + 702, + 544, + 713 + ], + "spans": [ + { + "bbox": [ + 324, + 702, + 544, + 713 + ], + "type": "inline_equation", + "content": "^{6}" + }, + { + "bbox": [ + 324, + 702, + 544, + 713 + ], + "type": "text", + "content": "https://github.com/johnmaijer/X-Few-_CD-FSOD" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "spans": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 25 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 57, + 70, + 295, + 171 + ], + "blocks": [ + { + "bbox": [ + 57, + 70, + 295, + 171 + ], + "lines": [ + { + "bbox": [ + 57, + 70, + 295, + 171 + ], + "spans": [ + { + "bbox": [ + 57, + 70, + 295, + 171 + ], + "type": "image", + "image_path": "3a8a5a344c8d7a48b7252170d1c6170ca1b9ad7c65a3ecb9a38a78a25b908a37.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 178, + 295, + 201 + ], + "lines": [ + { + "bbox": [ + 55, + 178, + 295, + 201 + ], + "spans": [ + { + "bbox": [ + 55, + 178, + 295, + 201 + ], + "type": "text", + "content": "Figure 14. Team X-Few: illustration of the proposed Instance Feature Caching (IFC)." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 218, + 296, + 291 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 218, + 296, + 291 + ], + "spans": [ + { + "bbox": [ + 55, + 218, + 296, + 291 + ], + "type": "text", + "content": "of " + }, + { + "bbox": [ + 55, + 218, + 296, + 291 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 55, + 218, + 296, + 291 + ], + "type": "text", + "content": " annotated instances, denoted as " + }, + { + "bbox": [ + 55, + 218, + 296, + 291 + ], + "type": "inline_equation", + "content": "I_{K}" + }, + { + "bbox": [ + 55, + 218, + 296, + 291 + ], + "type": "text", + "content": " with their associating labels " + }, + { + "bbox": [ + 55, + 218, + 296, + 291 + ], + "type": "inline_equation", + "content": "L_{N}" + }, + { + "bbox": [ + 55, + 218, + 296, + 291 + ], + "type": "text", + "content": ". For all " + }, + { + "bbox": [ + 55, + 218, + 296, + 291 + ], + "type": "inline_equation", + "content": "N \\times K" + }, + { + "bbox": [ + 55, + 218, + 296, + 291 + ], + "type": "text", + "content": " support samples, the proposed method leverages a pre-trained DINoV2 ViT " + }, + { + "bbox": [ + 55, + 218, + 296, + 291 + ], + "type": "inline_equation", + "content": "f_{CM}" + }, + { + "bbox": [ + 55, + 218, + 296, + 291 + ], + "type": "text", + "content": " to obtain the instance-level features " + }, + { + "bbox": [ + 55, + 218, + 296, + 291 + ], + "type": "inline_equation", + "content": "F_{train} \\in \\mathbf{R}^{NK \\times C}" + }, + { + "bbox": [ + 55, + 218, + 296, + 291 + ], + "type": "text", + "content": ". Similarly, the ground-truth labels are also encoded into " + }, + { + "bbox": [ + 55, + 218, + 296, + 291 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 55, + 218, + 296, + 291 + ], + "type": "text", + "content": "-dimensional one-hot vectors " + }, + { + "bbox": [ + 55, + 218, + 296, + 291 + ], + "type": "inline_equation", + "content": "L_{train} \\in \\mathbf{R}^{NK \\times N}" + }, + { + "bbox": [ + 55, + 218, + 296, + 291 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 135, + 297, + 294, + 309 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 135, + 297, + 294, + 309 + ], + "spans": [ + { + "bbox": [ + 135, + 297, + 294, + 309 + ], + "type": "interline_equation", + "content": "F _ {t r a i n} = \\mathbf {f} _ {C M} \\left(I _ {K}\\right) \\tag {12}", + "image_path": "a52655dc0e72448db1a5c90b37fef79880351951b1754df79cc9ae69a8a2f908.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 127, + 315, + 294, + 327 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 127, + 315, + 294, + 327 + ], + "spans": [ + { + "bbox": [ + 127, + 315, + 294, + 327 + ], + "type": "interline_equation", + "content": "L _ {\\text {t r a i n}} = \\mathbf {O n e H o t} \\left(I _ {N}\\right) \\tag {13}", + "image_path": "2e776d348f36269cd21f07a116e3c3ab05ba8b3b4376ab45fc04cc9c75b69a62.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 330, + 296, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 330, + 296, + 437 + ], + "spans": [ + { + "bbox": [ + 55, + 330, + 296, + 437 + ], + "type": "text", + "content": "The feature extraction step is performed in an offline fashion to ensure persistent storage of high-quality feature representations for support set instances, thereby preserving discriminative semantic characteristics and spatial-aware contextual patterns in a memory-efficient manner. Then, these features and their corresponding label encodings are systematically cached to establish a comprehensive knowledge base that facilitates adaptive domain-aware detection while mitigating catastrophic forgetting." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 437, + 296, + 593 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 437, + 296, + 593 + ], + "spans": [ + { + "bbox": [ + 55, + 437, + 296, + 593 + ], + "type": "text", + "content": "Instance Search. After constructing the instance feature caching, given a query image " + }, + { + "bbox": [ + 55, + 437, + 296, + 593 + ], + "type": "inline_equation", + "content": "\\mathcal{L}" + }, + { + "bbox": [ + 55, + 437, + 296, + 593 + ], + "type": "text", + "content": ", the proposed method first feeds " + }, + { + "bbox": [ + 55, + 437, + 296, + 593 + ], + "type": "inline_equation", + "content": "\\mathcal{L}" + }, + { + "bbox": [ + 55, + 437, + 296, + 593 + ], + "type": "text", + "content": " into both the Region Proposal Network and the Vision Transformer encoder to generate candidate regions and extract their deep features, respectively. These region proposals are then combined with the corresponding instance-level features in " + }, + { + "bbox": [ + 55, + 437, + 296, + 593 + ], + "type": "inline_equation", + "content": "\\mathcal{L}" + }, + { + "bbox": [ + 55, + 437, + 296, + 593 + ], + "type": "text", + "content": " to derive a query vector " + }, + { + "bbox": [ + 55, + 437, + 296, + 593 + ], + "type": "inline_equation", + "content": "f_{test}" + }, + { + "bbox": [ + 55, + 437, + 296, + 593 + ], + "type": "text", + "content": " for each candidate bounding box. Then, the proposed method achieves the most relevant instance feature lookup and finally calculate the adaptation representation " + }, + { + "bbox": [ + 55, + 437, + 296, + 593 + ], + "type": "inline_equation", + "content": "A \\times L_{train}" + }, + { + "bbox": [ + 55, + 437, + 296, + 593 + ], + "type": "text", + "content": " for the target domain, where " + }, + { + "bbox": [ + 55, + 437, + 296, + 593 + ], + "type": "inline_equation", + "content": "\\mathbf{A} \\in \\mathbf{R}^{NK}" + }, + { + "bbox": [ + 55, + 437, + 296, + 593 + ], + "type": "text", + "content": " is the affinity matrix between query vector and instance feature caching, being defined as:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 110, + 598, + 294, + 613 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 598, + 294, + 613 + ], + "spans": [ + { + "bbox": [ + 110, + 598, + 294, + 613 + ], + "type": "interline_equation", + "content": "\\mathbf {A} = \\exp (- \\beta (1 - f _ {\\text {t e s t}} F _ {\\text {t r a i n}} ^ {T})) \\tag {14}", + "image_path": "153d0b654b7da804fdc9de38e58dac2c9c03e4be0664d437046d606714320426.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 55, + 617, + 295, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 617, + 295, + 665 + ], + "spans": [ + { + "bbox": [ + 55, + 617, + 295, + 665 + ], + "type": "text", + "content": "Ultimately, the domain adaptation representation is fed into the classification and regression branches of the original detection framework to calibrate prediction results from the open-set detector:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 56, + 665, + 296, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 665, + 296, + 714 + ], + "spans": [ + { + "bbox": [ + 56, + 665, + 296, + 714 + ], + "type": "text", + "content": "1. Classification Enhancement: The similarity distribution between query features and cached features is leveraged to refine confidence estimates for the target domain categories through contrastive alignment." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 72, + 553, + 120 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 553, + 120 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 553, + 120 + ], + "type": "text", + "content": "2. Localization Refinement: Retrieved instance localization priors are incorporated to constrain bounding box regression, thereby mitigating cross-domain localization biases caused by domain shifts." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 121, + 553, + 168 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 121, + 553, + 168 + ], + "spans": [ + { + "bbox": [ + 313, + 121, + 553, + 168 + ], + "type": "text", + "content": "The above two strategies ensure that the detector adaptively aligns domain-invariant semantic representations while suppressing spurious correlations introduced by cross-domain discrepancies." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 175, + 411, + 186 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 175, + 411, + 186 + ], + "spans": [ + { + "bbox": [ + 313, + 175, + 411, + 186 + ], + "type": "text", + "content": "5.1.2. Training Details" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 190, + 555, + 299 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 190, + 555, + 299 + ], + "spans": [ + { + "bbox": [ + 313, + 190, + 555, + 299 + ], + "type": "text", + "content": "A single RTX A800 GPU is used for the experiments. The model is pre-trained on COCO and fine-tuned on novel support images. For the DeepFruit[60], Carpk[20], and CarDD[76], the specific hyper-parameters settings are shown in the Tab. 2. The tailored combination of learning rates and epoch schedules reflects a fine-grained tuning strategy to address domain heterogeneity across datasets, ensuring optimal trade-offs between generalization and task-specific optimization." + } + ] + } + ], + "index": 13 + }, + { + "type": "table", + "bbox": [ + 317, + 330, + 564, + 381 + ], + "blocks": [ + { + "bbox": [ + 336, + 309, + 531, + 321 + ], + "lines": [ + { + "bbox": [ + 336, + 309, + 531, + 321 + ], + "spans": [ + { + "bbox": [ + 336, + 309, + 531, + 321 + ], + "type": "text", + "content": "Table 2. Team X-Few: the hyper-parameters settings." + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 317, + 330, + 564, + 381 + ], + "lines": [ + { + "bbox": [ + 317, + 330, + 564, + 381 + ], + "spans": [ + { + "bbox": [ + 317, + 330, + 564, + 381 + ], + "type": "table", + "html": "
hyperparameter/shotDeepFruit [60]Carpk [20]CarDD [76]
151015101510
Batch size161616161616161616
Initial lr1e-31e-31e-31e-41e-41e-41e-31e-31e-3
Epoch40100200408010040100200
", + "image_path": "6fc03298da45892b97c900a30b15ac1c7bd6dd9d8d85b21db84865a5f8f20679.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "table_body" + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 402, + 358, + 414 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 402, + 358, + 414 + ], + "spans": [ + { + "bbox": [ + 313, + 402, + 358, + 414 + ], + "type": "text", + "content": "5.2. MM" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 313, + 420, + 418, + 432 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 420, + 418, + 432 + ], + "spans": [ + { + "bbox": [ + 313, + 420, + 418, + 432 + ], + "type": "text", + "content": "5.2.1. Proposed Method" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 313, + 435, + 555, + 530 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 435, + 555, + 530 + ], + "spans": [ + { + "bbox": [ + 313, + 435, + 555, + 530 + ], + "type": "text", + "content": "The MM team proposes a novel DFE-ViT method for CD-FSOD, in the closed set setting, which only takes COCO as the source data and transfers the model to a novel target. As in Fig. 15, the proposed DFE-ViT method is built upon one open-set detector (DE-ViT) and finetuned using a few labeled instances from the target domain. New improvements include Instance Feature Enhancement, ROI Feature Enhancement." + } + ] + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 317, + 543, + 552, + 640 + ], + "blocks": [ + { + "bbox": [ + 317, + 543, + 552, + 640 + ], + "lines": [ + { + "bbox": [ + 317, + 543, + 552, + 640 + ], + "spans": [ + { + "bbox": [ + 317, + 543, + 552, + 640 + ], + "type": "image", + "image_path": "0ccac6ed1ed6b29bebe32679af7dcabc465e116a0d16d870635d362ad7bd1b03.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 328, + 651, + 539, + 662 + ], + "lines": [ + { + "bbox": [ + 328, + 651, + 539, + 662 + ], + "spans": [ + { + "bbox": [ + 328, + 651, + 539, + 662 + ], + "type": "text", + "content": "Figure 15. Team MM: overall framework of the DFE-ViT." + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_caption" + } + ], + "index": 19 + }, + { + "bbox": [ + 313, + 677, + 554, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 677, + 554, + 715 + ], + "spans": [ + { + "bbox": [ + 313, + 677, + 554, + 715 + ], + "type": "text", + "content": "Specifically, given " + }, + { + "bbox": [ + 313, + 677, + 554, + 715 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 313, + 677, + 554, + 715 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 677, + 554, + 715 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 313, + 677, + 554, + 715 + ], + "type": "text", + "content": " as input, DFE-ViT follows a similar pipeline as DE-ViT to obtain instance features " + }, + { + "bbox": [ + 313, + 677, + 554, + 715 + ], + "type": "inline_equation", + "content": "F_{ins}" + }, + { + "bbox": [ + 313, + 677, + 554, + 715 + ], + "type": "text", + "content": ", region proposals " + }, + { + "bbox": [ + 313, + 677, + 554, + 715 + ], + "type": "inline_equation", + "content": "R_{q}" + }, + { + "bbox": [ + 313, + 677, + 554, + 715 + ], + "type": "text", + "content": ", visual features " + }, + { + "bbox": [ + 313, + 677, + 554, + 715 + ], + "type": "inline_equation", + "content": "F_{q}" + }, + { + "bbox": [ + 313, + 677, + 554, + 715 + ], + "type": "text", + "content": ", and ROI features" + } + ] + } + ], + "index": 21 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 732, + 310, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 732, + 310, + 742 + ], + "spans": [ + { + "bbox": [ + 300, + 732, + 310, + 742 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 294, + 132 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 294, + 132 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 294, + 132 + ], + "type": "inline_equation", + "content": "F_{q_{roi}}" + }, + { + "bbox": [ + 55, + 72, + 294, + 132 + ], + "type": "text", + "content": ". However, different from directly using " + }, + { + "bbox": [ + 55, + 72, + 294, + 132 + ], + "type": "inline_equation", + "content": "F_{ins}" + }, + { + "bbox": [ + 55, + 72, + 294, + 132 + ], + "type": "text", + "content": " to derive the class prototypes, an Instance Feature Enhancement module (IFE) and an ROI Feature Enhancement module (RFE) are proposed to enhance feature representation from both instance-level and ROI-level perspectives." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 54, + 133, + 295, + 300 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 133, + 295, + 300 + ], + "spans": [ + { + "bbox": [ + 54, + 133, + 295, + 300 + ], + "type": "text", + "content": "The IFE module adopts a residual CBAM structure to refine " + }, + { + "bbox": [ + 54, + 133, + 295, + 300 + ], + "type": "inline_equation", + "content": "F_{ins}^{ob}" + }, + { + "bbox": [ + 54, + 133, + 295, + 300 + ], + "type": "text", + "content": ", enabling the network to adaptively emphasize informative channels and spatial regions. To guide this attention process more explicitly, a dedicated CBAM loss " + }, + { + "bbox": [ + 54, + 133, + 295, + 300 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{cbam}" + }, + { + "bbox": [ + 54, + 133, + 295, + 300 + ], + "type": "text", + "content": " is designed, which encourages the enhanced instance features to align with salient regions in both spatial and channel dimensions. Furthermore, to enhance semantic alignment, a class prototype enhancement mechanism is further incorporated where each object instance interacts with its corresponding class prototype via cross-attention, ensuring more discriminative and category-aware features. The output of IFE is optimized jointly with the standard detection losses, including the localization loss " + }, + { + "bbox": [ + 54, + 133, + 295, + 300 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{loc}" + }, + { + "bbox": [ + 54, + 133, + 295, + 300 + ], + "type": "text", + "content": ", classification loss " + }, + { + "bbox": [ + 54, + 133, + 295, + 300 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{cls}" + }, + { + "bbox": [ + 54, + 133, + 295, + 300 + ], + "type": "text", + "content": ", and the attention-guided loss " + }, + { + "bbox": [ + 54, + 133, + 295, + 300 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{cbam}" + }, + { + "bbox": [ + 54, + 133, + 295, + 300 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 300, + 295, + 407 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 300, + 295, + 407 + ], + "spans": [ + { + "bbox": [ + 55, + 300, + 295, + 407 + ], + "type": "text", + "content": "For ROI features, this team introduces RFE based on a Variational Autoencoder (VAE). Each ROI feature " + }, + { + "bbox": [ + 55, + 300, + 295, + 407 + ], + "type": "inline_equation", + "content": "F_{q_{roi}}" + }, + { + "bbox": [ + 55, + 300, + 295, + 407 + ], + "type": "text", + "content": " is encoded into a latent distribution and then reconstructed, which enables learning a more robust and expressive representation. A reconstruction loss " + }, + { + "bbox": [ + 55, + 300, + 295, + 407 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{vae}" + }, + { + "bbox": [ + 55, + 300, + 295, + 407 + ], + "type": "text", + "content": " is employed to ensure fidelity and consistency in the learned latent space. This ROI-level enhancement complements the instance-level refinement, offering a more diversified and generalized feature representation." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 407, + 295, + 443 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 407, + 295, + 443 + ], + "spans": [ + { + "bbox": [ + 55, + 407, + 295, + 443 + ], + "type": "text", + "content": "The top modules including the detection head " + }, + { + "bbox": [ + 55, + 407, + 295, + 443 + ], + "type": "inline_equation", + "content": "M_{DET}" + }, + { + "bbox": [ + 55, + 407, + 295, + 443 + ], + "type": "text", + "content": " and the classification head " + }, + { + "bbox": [ + 55, + 407, + 295, + 443 + ], + "type": "inline_equation", + "content": "M_{CLS}" + }, + { + "bbox": [ + 55, + 407, + 295, + 443 + ], + "type": "text", + "content": " are fine-tuned using the combined objective:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 80, + 452, + 294, + 464 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 452, + 294, + 464 + ], + "spans": [ + { + "bbox": [ + 80, + 452, + 294, + 464 + ], + "type": "interline_equation", + "content": "\\mathcal {L} = \\mathcal {L} _ {l o c} + \\mathcal {L} _ {c l s} + \\alpha * \\mathcal {L} _ {c b a m} + \\beta * \\mathcal {L} _ {v a e}. \\tag {15}", + "image_path": "34b272fd1df84c6f17f1492d20fb8dd6350fd3e78c40deb4876d1c5137bf0fdc.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 473, + 295, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 473, + 295, + 640 + ], + "spans": [ + { + "bbox": [ + 55, + 473, + 295, + 640 + ], + "type": "text", + "content": "Instance Feature Enhancement. The IFE module aims to refine instance features by integrating spatial/channel attention and semantic guidance. Given input instance features " + }, + { + "bbox": [ + 55, + 473, + 295, + 640 + ], + "type": "inline_equation", + "content": "F_{ins} \\in \\mathbb{R}^{B \\times C \\times H \\times W}" + }, + { + "bbox": [ + 55, + 473, + 295, + 640 + ], + "type": "text", + "content": ", it first applies a residual CBAM to obtain spatially and channel-refined features " + }, + { + "bbox": [ + 55, + 473, + 295, + 640 + ], + "type": "inline_equation", + "content": "F_{cbam}" + }, + { + "bbox": [ + 55, + 473, + 295, + 640 + ], + "type": "text", + "content": ". Then, class prototypes " + }, + { + "bbox": [ + 55, + 473, + 295, + 640 + ], + "type": "inline_equation", + "content": "P \\in \\mathbb{R}^{N \\times C}" + }, + { + "bbox": [ + 55, + 473, + 295, + 640 + ], + "type": "text", + "content": " are used to semantically enhance the instance features via a cross-attention mechanism. Specifically, query and key projections are computed as " + }, + { + "bbox": [ + 55, + 473, + 295, + 640 + ], + "type": "inline_equation", + "content": "Q = W_qF_{ins}" + }, + { + "bbox": [ + 55, + 473, + 295, + 640 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 55, + 473, + 295, + 640 + ], + "type": "inline_equation", + "content": "K = W_kP" + }, + { + "bbox": [ + 55, + 473, + 295, + 640 + ], + "type": "text", + "content": ", followed by attention: " + }, + { + "bbox": [ + 55, + 473, + 295, + 640 + ], + "type": "inline_equation", + "content": "A = \\text{softmax}(QK^\\top / \\sqrt{d})" + }, + { + "bbox": [ + 55, + 473, + 295, + 640 + ], + "type": "text", + "content": ". The attended prototype features are added with a learnable weight " + }, + { + "bbox": [ + 55, + 473, + 295, + 640 + ], + "type": "inline_equation", + "content": "\\gamma" + }, + { + "bbox": [ + 55, + 473, + 295, + 640 + ], + "type": "text", + "content": ", yielding " + }, + { + "bbox": [ + 55, + 473, + 295, + 640 + ], + "type": "inline_equation", + "content": "F_{proto}" + }, + { + "bbox": [ + 55, + 473, + 295, + 640 + ], + "type": "text", + "content": ". The final enhanced features are computed as " + }, + { + "bbox": [ + 55, + 473, + 295, + 640 + ], + "type": "inline_equation", + "content": "F_{enh} = F_{cbam} + F_{proto}" + }, + { + "bbox": [ + 55, + 473, + 295, + 640 + ], + "type": "text", + "content": ", which are more discriminative for downstream detection." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 642, + 295, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 642, + 295, + 714 + ], + "spans": [ + { + "bbox": [ + 55, + 642, + 295, + 714 + ], + "type": "text", + "content": "ROI Feature Enhancement. The RFE module is based on a Variational Autoencoder and class prototype computation. As shown in Fig. 15, the orange modules represent the newly proposed contributions: using VAE to model ROI features and enriching them with class prototypes. Given input ROI features " + }, + { + "bbox": [ + 55, + 642, + 295, + 714 + ], + "type": "inline_equation", + "content": "x \\in \\mathbb{R}^{N \\times C \\times k \\times k}" + }, + { + "bbox": [ + 55, + 642, + 295, + 714 + ], + "type": "text", + "content": ", VAE" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 313, + 72, + 553, + 205 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 553, + 205 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 553, + 205 + ], + "type": "text", + "content": "encodes " + }, + { + "bbox": [ + 313, + 72, + 553, + 205 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 313, + 72, + 553, + 205 + ], + "type": "text", + "content": " into latent mean " + }, + { + "bbox": [ + 313, + 72, + 553, + 205 + ], + "type": "inline_equation", + "content": "\\mu \\in \\mathbb{R}^{N \\times d}" + }, + { + "bbox": [ + 313, + 72, + 553, + 205 + ], + "type": "text", + "content": " and log-variance " + }, + { + "bbox": [ + 313, + 72, + 553, + 205 + ], + "type": "inline_equation", + "content": "\\log \\sigma^2 \\in \\mathbb{R}^{N \\times d}" + }, + { + "bbox": [ + 313, + 72, + 553, + 205 + ], + "type": "text", + "content": " through linear layers. Latent variables are sampled as " + }, + { + "bbox": [ + 313, + 72, + 553, + 205 + ], + "type": "inline_equation", + "content": "z = \\mu + \\sigma \\odot \\epsilon" + }, + { + "bbox": [ + 313, + 72, + 553, + 205 + ], + "type": "text", + "content": " using the reparameterization trick. Then, " + }, + { + "bbox": [ + 313, + 72, + 553, + 205 + ], + "type": "inline_equation", + "content": "z" + }, + { + "bbox": [ + 313, + 72, + 553, + 205 + ], + "type": "text", + "content": " is decoded to reconstruct the ROI features " + }, + { + "bbox": [ + 313, + 72, + 553, + 205 + ], + "type": "inline_equation", + "content": "\\hat{x} = \\mathrm{Decoder}(z)" + }, + { + "bbox": [ + 313, + 72, + 553, + 205 + ], + "type": "text", + "content": ". The reconstruction loss is computed as " + }, + { + "bbox": [ + 313, + 72, + 553, + 205 + ], + "type": "inline_equation", + "content": "L_{\\text{recon}} = \\frac{1}{N} \\sum_{i=1}^{N} \\| \\hat{x}_i - x_i \\|^2" + }, + { + "bbox": [ + 313, + 72, + 553, + 205 + ], + "type": "text", + "content": ", and the KL divergence loss regularizes the latent distribution: " + }, + { + "bbox": [ + 313, + 72, + 553, + 205 + ], + "type": "inline_equation", + "content": "L_{KL} = -\\frac{1}{2} \\sum_{i=1}^{N} (1 + \\log \\sigma_i^2 - \\mu_i^2 - \\sigma_i^2)" + }, + { + "bbox": [ + 313, + 72, + 553, + 205 + ], + "type": "text", + "content": ". The total VAE loss is " + }, + { + "bbox": [ + 313, + 72, + 553, + 205 + ], + "type": "inline_equation", + "content": "L_{vae} = L_{\\text{recon}} + L_{KL}" + }, + { + "bbox": [ + 313, + 72, + 553, + 205 + ], + "type": "text", + "content": ". Finally, class prototypes are computed to further enhance feature representation across categories." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 213, + 411, + 224 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 213, + 411, + 224 + ], + "spans": [ + { + "bbox": [ + 313, + 213, + 411, + 224 + ], + "type": "text", + "content": "5.2.2. Training Details" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 228, + 555, + 407 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 228, + 555, + 407 + ], + "spans": [ + { + "bbox": [ + 313, + 228, + 555, + 407 + ], + "type": "text", + "content": "The model is trained in the \"pretrain, finetune, and test\" pipeline. Specifically, the base DE-ViT model pretrained on COCO is taken, then the " + }, + { + "bbox": [ + 313, + 228, + 555, + 407 + ], + "type": "inline_equation", + "content": "M_{DET}" + }, + { + "bbox": [ + 313, + 228, + 555, + 407 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 313, + 228, + 555, + 407 + ], + "type": "inline_equation", + "content": "M_{CLS}" + }, + { + "bbox": [ + 313, + 228, + 555, + 407 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 313, + 228, + 555, + 407 + ], + "type": "inline_equation", + "content": "IFE" + }, + { + "bbox": [ + 313, + 228, + 555, + 407 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 228, + 555, + 407 + ], + "type": "inline_equation", + "content": "RFE" + }, + { + "bbox": [ + 313, + 228, + 555, + 407 + ], + "type": "text", + "content": " are tuned on novel support images " + }, + { + "bbox": [ + 313, + 228, + 555, + 407 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 313, + 228, + 555, + 407 + ], + "type": "text", + "content": " using the loss as in Eq. 15. The hyperparameter " + }, + { + "bbox": [ + 313, + 228, + 555, + 407 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 313, + 228, + 555, + 407 + ], + "type": "text", + "content": " temperature for " + }, + { + "bbox": [ + 313, + 228, + 555, + 407 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{cbam}" + }, + { + "bbox": [ + 313, + 228, + 555, + 407 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 313, + 228, + 555, + 407 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 313, + 228, + 555, + 407 + ], + "type": "text", + "content": " temperature for " + }, + { + "bbox": [ + 313, + 228, + 555, + 407 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{vae}" + }, + { + "bbox": [ + 313, + 228, + 555, + 407 + ], + "type": "text", + "content": " are set as 0.3, 0.4 for all the target datasets. While the value " + }, + { + "bbox": [ + 313, + 228, + 555, + 407 + ], + "type": "inline_equation", + "content": "N_{dom}" + }, + { + "bbox": [ + 313, + 228, + 555, + 407 + ], + "type": "text", + "content": " means the number of virtual domains depending on the number of target classes " + }, + { + "bbox": [ + 313, + 228, + 555, + 407 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 313, + 228, + 555, + 407 + ], + "type": "text", + "content": ", specifically, " + }, + { + "bbox": [ + 313, + 228, + 555, + 407 + ], + "type": "inline_equation", + "content": "N_{dom} = 2 * N" + }, + { + "bbox": [ + 313, + 228, + 555, + 407 + ], + "type": "text", + "content": ". The hyperparameter Top-K (" + }, + { + "bbox": [ + 313, + 228, + 555, + 407 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 313, + 228, + 555, + 407 + ], + "type": "text", + "content": ") in DE-ViT is set to 5. For datasets with the number of classes " + }, + { + "bbox": [ + 313, + 228, + 555, + 407 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 313, + 228, + 555, + 407 + ], + "type": "text", + "content": " less than 5, " + }, + { + "bbox": [ + 313, + 228, + 555, + 407 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 313, + 228, + 555, + 407 + ], + "type": "text", + "content": " is set to " + }, + { + "bbox": [ + 313, + 228, + 555, + 407 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 313, + 228, + 555, + 407 + ], + "type": "text", + "content": ". The trainable parameters are finetuned on 1-shot around 80 epochs, and on 5/10-shot around 50 epochs. The SGD with a learning rate of 0.002 is used as the optimizer. Experiments are performed on four A6000 GPUs." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 416, + 358, + 427 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 416, + 358, + 427 + ], + "spans": [ + { + "bbox": [ + 313, + 416, + 358, + 427 + ], + "type": "text", + "content": "5.3. FSV" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 434, + 418, + 445 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 434, + 418, + 445 + ], + "spans": [ + { + "bbox": [ + 313, + 434, + 418, + 445 + ], + "type": "text", + "content": "5.3.1. Proposed Method" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 449, + 553, + 653 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 449, + 553, + 653 + ], + "spans": [ + { + "bbox": [ + 313, + 449, + 553, + 653 + ], + "type": "text", + "content": "The FSV team proposes an enhancement to the prototype-based detection for the cross-domain few-shot object detection (CD-FSOD) challenge under the closed-source setting, based on the CD-ViTO baseline model, as shown in Figure 16. Based on observations of the existing approach, this team found that CD-FSOD faces three key challenges. First, few-shot learning inherently suffers from limited example diversity. Second, conventional binary masking treats all spatial locations within an object region equally, which fails to prioritize more discriminative central regions over potentially noisy boundary areas. Third, standard cosine similarity calculations between query features and prototypes lack proper calibration, resulting in suboptimal separability across domain shifts. To solve these three challenges, this team explores three techniques: (1) Support Set Data Augmentation, (2) Soft Mask-Based Prototype Aggregation, and (3) Temperature-Scaled Similarity Calibration." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 654, + 553, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 654, + 553, + 714 + ], + "spans": [ + { + "bbox": [ + 313, + 654, + 553, + 714 + ], + "type": "text", + "content": "Support Set Data Augmentation. For the support images, the proposed approach constructs a stochastic augmentation function to increase the diversity of the samples. DINOv2 [48] is used as the feature extraction backbone for the augmented data, for its robust self-supervised learning capa" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "spans": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 66, + 73, + 289, + 171 + ], + "blocks": [ + { + "bbox": [ + 66, + 73, + 289, + 171 + ], + "lines": [ + { + "bbox": [ + 66, + 73, + 289, + 171 + ], + "spans": [ + { + "bbox": [ + 66, + 73, + 289, + 171 + ], + "type": "image", + "image_path": "8e0a5ce6cbe5f1bc98968e9128663375e2bf4d8e17311e0087fd301456ee1c0b.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 70, + 182, + 280, + 194 + ], + "lines": [ + { + "bbox": [ + 70, + 182, + 280, + 194 + ], + "spans": [ + { + "bbox": [ + 70, + 182, + 280, + 194 + ], + "type": "text", + "content": "Figure 16. Team FSV: overview of the proposed method." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 203, + 295, + 262 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 203, + 295, + 262 + ], + "spans": [ + { + "bbox": [ + 55, + 203, + 295, + 262 + ], + "type": "text", + "content": "bilities and effective cross-domain transfer. The augmentation pipeline consists of a composition of transformations including Random Saturation, Random Contrast, Random Brightness, Random Flip, Random Rotation, Random Crop, Random Erasing, and Resize Shortest Edge." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 263, + 296, + 396 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 263, + 296, + 396 + ], + "spans": [ + { + "bbox": [ + 55, + 263, + 296, + 396 + ], + "type": "text", + "content": "Soft Mask-Based Prototype Aggregation. To prioritize more discriminative central regions over potentially noisy boundary areas, the conventional binary masks are replaced by Gaussian soft masks to create soft spatial attention. Let " + }, + { + "bbox": [ + 55, + 263, + 296, + 396 + ], + "type": "inline_equation", + "content": "F_{ins} = \\{F_{ins}^{ob}, F_{ins}^{bg}\\}" + }, + { + "bbox": [ + 55, + 263, + 296, + 396 + ], + "type": "text", + "content": " denote the extracted instance features and " + }, + { + "bbox": [ + 55, + 263, + 296, + 396 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 55, + 263, + 296, + 396 + ], + "type": "text", + "content": " denote the binary mask of an instance. The soft mask could be defined " + }, + { + "bbox": [ + 55, + 263, + 296, + 396 + ], + "type": "inline_equation", + "content": "\\tilde{M}" + }, + { + "bbox": [ + 55, + 263, + 296, + 396 + ], + "type": "text", + "content": " as: " + }, + { + "bbox": [ + 55, + 263, + 296, + 396 + ], + "type": "inline_equation", + "content": "\\tilde{M} = \\frac{G_{\\sigma}(M)}{\\max G_{\\sigma}(M)}" + }, + { + "bbox": [ + 55, + 263, + 296, + 396 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 55, + 263, + 296, + 396 + ], + "type": "inline_equation", + "content": "G_{\\sigma}" + }, + { + "bbox": [ + 55, + 263, + 296, + 396 + ], + "type": "text", + "content": " is the Gaussian filter with standard deviation parameter " + }, + { + "bbox": [ + 55, + 263, + 296, + 396 + ], + "type": "inline_equation", + "content": "\\sigma" + }, + { + "bbox": [ + 55, + 263, + 296, + 396 + ], + "type": "text", + "content": ". The extracted instance features for foreground objects " + }, + { + "bbox": [ + 55, + 263, + 296, + 396 + ], + "type": "inline_equation", + "content": "F_{ins}^{ob}" + }, + { + "bbox": [ + 55, + 263, + 296, + 396 + ], + "type": "text", + "content": " are then weighted by the soft mask " + }, + { + "bbox": [ + 55, + 263, + 296, + 396 + ], + "type": "inline_equation", + "content": "\\tilde{M}" + }, + { + "bbox": [ + 55, + 263, + 296, + 396 + ], + "type": "text", + "content": ", used as the initialization for learnable instance features." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 397, + 296, + 506 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 397, + 296, + 506 + ], + "spans": [ + { + "bbox": [ + 55, + 397, + 296, + 506 + ], + "type": "text", + "content": "Temperature-Scaled Similarity Calibration. Finally, to calibrate image features to other domains, the proposed approach takes temperature scaling to make the final prototypes better match those in the new domain, which is a simple yet effective strategy to improve the discriminability of similarity scores. Let " + }, + { + "bbox": [ + 55, + 397, + 296, + 506 + ], + "type": "inline_equation", + "content": "F_{q_{roi}}" + }, + { + "bbox": [ + 55, + 397, + 296, + 506 + ], + "type": "text", + "content": " denote the ROI features extracted from a query image using DINOv2. " + }, + { + "bbox": [ + 55, + 397, + 296, + 506 + ], + "type": "inline_equation", + "content": "F_{pro}" + }, + { + "bbox": [ + 55, + 397, + 296, + 506 + ], + "type": "text", + "content": " denotes the prototype vector. The temperature scaling is applied during the cosine similarity computation as" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 119, + 513, + 295, + 543 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 513, + 295, + 543 + ], + "spans": [ + { + "bbox": [ + 119, + 513, + 295, + 543 + ], + "type": "interline_equation", + "content": "s _ {\\tau} = \\frac {F _ {q _ {r o i}} ^ {\\top} F _ {p r o}}{\\tau \\cdot \\| F _ {q _ {r o i}} \\| \\cdot \\| F _ {p r o} \\|}, \\tag {16}", + "image_path": "8256797b5f0a9495c1811d89895022caafb9339d5fda93b87d1650cdffd08b1d.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 549, + 296, + 597 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 549, + 296, + 597 + ], + "spans": [ + { + "bbox": [ + 55, + 549, + 296, + 597 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 55, + 549, + 296, + 597 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 55, + 549, + 296, + 597 + ], + "type": "text", + "content": " is a temperature parameter that controls the sharpness of the similarity distribution. By tuning the temperature parameter, the entropy of the output distribution can be better modulated." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 602, + 185, + 615 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 602, + 185, + 615 + ], + "spans": [ + { + "bbox": [ + 55, + 602, + 185, + 615 + ], + "type": "text", + "content": "5.3.2. Implementation Details" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 55, + 617, + 296, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 617, + 296, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 617, + 296, + 713 + ], + "type": "text", + "content": "The training procedure utilizes only the provided few-shot datasets (1-shot, 5-shot, and 10-shot variants), without incorporating additional external data. The trainable parameters are finetuned for each testing dataset around 100 epochs. The training batch size is 16, with a base learning rate of 0.002. The parameter " + }, + { + "bbox": [ + 55, + 617, + 296, + 713 + ], + "type": "inline_equation", + "content": "\\sigma" + }, + { + "bbox": [ + 55, + 617, + 296, + 713 + ], + "type": "text", + "content": " in Soft Mask-Based Prototype Aggregation is set to 2.0. The parameter " + }, + { + "bbox": [ + 55, + 617, + 296, + 713 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 55, + 617, + 296, + 713 + ], + "type": "text", + "content": " in Temperature-Scaled Similarity Calibration is set to 0.07." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 72, + 550, + 84 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 550, + 84 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 550, + 84 + ], + "type": "text", + "content": "Experiments are performed on four NVIDIA A100 GPUs." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 314, + 91, + 356, + 103 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 91, + 356, + 103 + ], + "spans": [ + { + "bbox": [ + 314, + 91, + 356, + 103 + ], + "type": "text", + "content": "5.4. IPC" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 314, + 110, + 419, + 121 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 110, + 419, + 121 + ], + "spans": [ + { + "bbox": [ + 314, + 110, + 419, + 121 + ], + "type": "text", + "content": "5.4.1. Proposed Method" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 124, + 554, + 209 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 124, + 554, + 209 + ], + "spans": [ + { + "bbox": [ + 313, + 124, + 554, + 209 + ], + "type": "text", + "content": "The IPC team utilizes CD-ViTO as the baseline, which is an improved version of the DE-ViT method, designed to enhance the cross-domain detection capability. To further mitigate performance degradation caused by cross-domain discrepancies and a very small number of test domain reference examples, this team was inspired by [59] to introduce a test-time adaptation algorithm during the inference phase." + } + ] + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 320, + 224, + 550, + 380 + ], + "blocks": [ + { + "bbox": [ + 320, + 224, + 550, + 380 + ], + "lines": [ + { + "bbox": [ + 320, + 224, + 550, + 380 + ], + "spans": [ + { + "bbox": [ + 320, + 224, + 550, + 380 + ], + "type": "image", + "image_path": "3fc263e3ffbb96207506aa0a9167656cd36623edb2810e5480bf08198e1c4a2a.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 313, + 392, + 555, + 480 + ], + "lines": [ + { + "bbox": [ + 313, + 392, + 555, + 480 + ], + "spans": [ + { + "bbox": [ + 313, + 392, + 555, + 480 + ], + "type": "text", + "content": "Figure 17. Team IPC: overview of the proposed approach. The upper section represents the baseline CD-ViTO fine-tuning phase; the lower section represents the test-time adaptation (TTA) process. The TTA procedure operates without access to the original training data, updating the fine-tuned detector on a single testing image before making a prediction. Crucially, only the mask prediction module in CD-ViTO undergoes gradient updates during TTA iterations." + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 317, + 495, + 553, + 550 + ], + "blocks": [ + { + "bbox": [ + 317, + 495, + 553, + 550 + ], + "lines": [ + { + "bbox": [ + 317, + 495, + 553, + 550 + ], + "spans": [ + { + "bbox": [ + 317, + 495, + 553, + 550 + ], + "type": "image", + "image_path": "8c8332ca03b01b9b660825506ac66cda3019c014dd3bbd75b026f0da59b8569c.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 313, + 560, + 554, + 604 + ], + "lines": [ + { + "bbox": [ + 313, + 560, + 554, + 604 + ], + "spans": [ + { + "bbox": [ + 313, + 560, + 554, + 604 + ], + "type": "text", + "content": "Figure 18. Team IPC: by iteratively retaining proposals (yellow boxes " + }, + { + "bbox": [ + 313, + 560, + 554, + 604 + ], + "type": "inline_equation", + "content": "\\square" + }, + { + "bbox": [ + 313, + 560, + 554, + 604 + ], + "type": "text", + "content": ") with high confidence scores as pseudo labels (red boxes " + }, + { + "bbox": [ + 313, + 560, + 554, + 604 + ], + "type": "inline_equation", + "content": "\\square" + }, + { + "bbox": [ + 313, + 560, + 554, + 604 + ], + "type": "text", + "content": "), the model can effectively filter out most invalid detection boxes." + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 617, + 555, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 617, + 555, + 715 + ], + "spans": [ + { + "bbox": [ + 313, + 617, + 555, + 715 + ], + "type": "text", + "content": "To be specific, the proposed approach employs an iterative process as shown in Fig 17. During each iteration " + }, + { + "bbox": [ + 313, + 617, + 555, + 715 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 313, + 617, + 555, + 715 + ], + "type": "text", + "content": " (where " + }, + { + "bbox": [ + 313, + 617, + 555, + 715 + ], + "type": "inline_equation", + "content": "t \\in \\{1, \\dots, T\\}" + }, + { + "bbox": [ + 313, + 617, + 555, + 715 + ], + "type": "text", + "content": "), the existing detector " + }, + { + "bbox": [ + 313, + 617, + 555, + 715 + ], + "type": "inline_equation", + "content": "\\theta_{t-1}" + }, + { + "bbox": [ + 313, + 617, + 555, + 715 + ], + "type": "text", + "content": " generates predictions " + }, + { + "bbox": [ + 313, + 617, + 555, + 715 + ], + "type": "inline_equation", + "content": "D_t = \\{(b_{t,i}, p_{t,i}) : \\forall i\\}" + }, + { + "bbox": [ + 313, + 617, + 555, + 715 + ], + "type": "text", + "content": " for image " + }, + { + "bbox": [ + 313, + 617, + 555, + 715 + ], + "type": "inline_equation", + "content": "I" + }, + { + "bbox": [ + 313, + 617, + 555, + 715 + ], + "type": "text", + "content": ", with " + }, + { + "bbox": [ + 313, + 617, + 555, + 715 + ], + "type": "inline_equation", + "content": "b_{t,i}" + }, + { + "bbox": [ + 313, + 617, + 555, + 715 + ], + "type": "text", + "content": " representing the " + }, + { + "bbox": [ + 313, + 617, + 555, + 715 + ], + "type": "inline_equation", + "content": "i^{th}" + }, + { + "bbox": [ + 313, + 617, + 555, + 715 + ], + "type": "text", + "content": " object's bounding box and " + }, + { + "bbox": [ + 313, + 617, + 555, + 715 + ], + "type": "inline_equation", + "content": "p_{t,i} \\in [0,1]^K" + }, + { + "bbox": [ + 313, + 617, + 555, + 715 + ], + "type": "text", + "content": " denoting the class probability distribution across " + }, + { + "bbox": [ + 313, + 617, + 555, + 715 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 313, + 617, + 555, + 715 + ], + "type": "text", + "content": " categories. The detection confidence " + }, + { + "bbox": [ + 313, + 617, + 555, + 715 + ], + "type": "inline_equation", + "content": "c_{t,i} \\in [0,1]" + }, + { + "bbox": [ + 313, + 617, + 555, + 715 + ], + "type": "text", + "content": " is determined by the highest probability in " + }, + { + "bbox": [ + 313, + 617, + 555, + 715 + ], + "type": "inline_equation", + "content": "p_{t,i}" + }, + { + "bbox": [ + 313, + 617, + 555, + 715 + ], + "type": "text", + "content": ", while the" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "spans": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 294, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 294, + 156 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 294, + 156 + ], + "type": "text", + "content": "corresponding class index gives the predicted object category " + }, + { + "bbox": [ + 55, + 72, + 294, + 156 + ], + "type": "inline_equation", + "content": "y_{t,i} \\in \\{1, \\dots, K\\}" + }, + { + "bbox": [ + 55, + 72, + 294, + 156 + ], + "type": "text", + "content": ". Confident detections are then selected as pseudo-labels as illustrated in Fig 18: " + }, + { + "bbox": [ + 55, + 72, + 294, + 156 + ], + "type": "inline_equation", + "content": "P_t = \\{(b_{t,i}, y_{t,i}) : c_{t,i} > \\lambda_{conf}\\}" + }, + { + "bbox": [ + 55, + 72, + 294, + 156 + ], + "type": "text", + "content": ", with " + }, + { + "bbox": [ + 55, + 72, + 294, + 156 + ], + "type": "inline_equation", + "content": "\\lambda_{conf}" + }, + { + "bbox": [ + 55, + 72, + 294, + 156 + ], + "type": "text", + "content": " serving as the confidence cutoff. The detector is subsequently refined through gradient descent on these pseudo-labels, yielding an improved model " + }, + { + "bbox": [ + 55, + 72, + 294, + 156 + ], + "type": "inline_equation", + "content": "\\theta_t" + }, + { + "bbox": [ + 55, + 72, + 294, + 156 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 156, + 294, + 252 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 156, + 294, + 252 + ], + "spans": [ + { + "bbox": [ + 55, + 156, + 294, + 252 + ], + "type": "text", + "content": "For the initial iteration " + }, + { + "bbox": [ + 55, + 156, + 294, + 252 + ], + "type": "inline_equation", + "content": "(t = 1)" + }, + { + "bbox": [ + 55, + 156, + 294, + 252 + ], + "type": "text", + "content": ", the detector " + }, + { + "bbox": [ + 55, + 156, + 294, + 252 + ], + "type": "inline_equation", + "content": "\\theta_{t - 1}" + }, + { + "bbox": [ + 55, + 156, + 294, + 252 + ], + "type": "text", + "content": " is initialized as " + }, + { + "bbox": [ + 55, + 156, + 294, + 252 + ], + "type": "inline_equation", + "content": "\\theta_0" + }, + { + "bbox": [ + 55, + 156, + 294, + 252 + ], + "type": "text", + "content": ", which was pre-trained on source domain data. Upon completion of the final iteration " + }, + { + "bbox": [ + 55, + 156, + 294, + 252 + ], + "type": "inline_equation", + "content": "(t = T)" + }, + { + "bbox": [ + 55, + 156, + 294, + 252 + ], + "type": "text", + "content": ", the optimized model " + }, + { + "bbox": [ + 55, + 156, + 294, + 252 + ], + "type": "inline_equation", + "content": "\\theta_T" + }, + { + "bbox": [ + 55, + 156, + 294, + 252 + ], + "type": "text", + "content": " produces the final predictions for " + }, + { + "bbox": [ + 55, + 156, + 294, + 252 + ], + "type": "inline_equation", + "content": "I" + }, + { + "bbox": [ + 55, + 156, + 294, + 252 + ], + "type": "text", + "content": ". Notably, this self-training paradigm maintains the original network architecture and operates without requiring access to source data or any other pretrained foundation models during adaptation." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 259, + 153, + 270 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 259, + 153, + 270 + ], + "spans": [ + { + "bbox": [ + 55, + 259, + 153, + 270 + ], + "type": "text", + "content": "5.4.2. Training Details" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 274, + 295, + 382 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 274, + 295, + 382 + ], + "spans": [ + { + "bbox": [ + 55, + 274, + 295, + 382 + ], + "type": "text", + "content": "A single NVIDIA A6000 GPU is used for all experiments. The proposed method extends the CD-ViTO baseline through a test-time adaptation pipeline, initialized with k-shot instance fine-tuning on novel support datasets. During inference, the proposed method processes each test image using momentum SGD (" + }, + { + "bbox": [ + 55, + 274, + 295, + 382 + ], + "type": "inline_equation", + "content": "\\beta = 0.9" + }, + { + "bbox": [ + 55, + 274, + 295, + 382 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 55, + 274, + 295, + 382 + ], + "type": "inline_equation", + "content": "\\alpha = 0.001" + }, + { + "bbox": [ + 55, + 274, + 295, + 382 + ], + "type": "text", + "content": ") to exclusively update the mask prediction module through 5 iterations. For all experimental datasets, the cut-off confidence threshold " + }, + { + "bbox": [ + 55, + 274, + 295, + 382 + ], + "type": "inline_equation", + "content": "\\lambda_{conf}" + }, + { + "bbox": [ + 55, + 274, + 295, + 382 + ], + "type": "text", + "content": " is empirically set to 0.6." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 390, + 99, + 402 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 390, + 99, + 402 + ], + "spans": [ + { + "bbox": [ + 55, + 390, + 99, + 402 + ], + "type": "text", + "content": "5.5.LJY" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 407, + 160, + 420 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 407, + 160, + 420 + ], + "spans": [ + { + "bbox": [ + 55, + 407, + 160, + 420 + ], + "type": "text", + "content": "5.5.1. Proposed Method" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 423, + 295, + 518 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 423, + 295, + 518 + ], + "spans": [ + { + "bbox": [ + 55, + 423, + 295, + 518 + ], + "type": "text", + "content": "As shown in Fig. 19, the LJY team proposes similarity calibrated prototype refinement network, which utilizes query-aware guidelines to generate prototypes. The network contains a pretrained DINOv2 ViT, a region proposal network, an ROI align module, a detection head, and a one-vs-rest classification head. During the finetuning stage, the parameters of DINOv2 ViT are frozen. Only the parameters of the detection head and the classification head are finetuned." + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 59, + 528, + 294, + 662 + ], + "blocks": [ + { + "bbox": [ + 59, + 528, + 294, + 662 + ], + "lines": [ + { + "bbox": [ + 59, + 528, + 294, + 662 + ], + "spans": [ + { + "bbox": [ + 59, + 528, + 294, + 662 + ], + "type": "image", + "image_path": "a08ef0f9dc4809732e94f6a54a4e9fb6edf8cb2fe27dba8bc5508c95462c3ad5.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 82, + 670, + 268, + 681 + ], + "lines": [ + { + "bbox": [ + 82, + 670, + 268, + 681 + ], + "spans": [ + { + "bbox": [ + 82, + 670, + 268, + 681 + ], + "type": "text", + "content": "Figure 19. Team LJY: overall framework of SCPR." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "bbox": [ + 55, + 689, + 295, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 689, + 295, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 689, + 295, + 713 + ], + "type": "text", + "content": "Given a query image " + }, + { + "bbox": [ + 55, + 689, + 295, + 713 + ], + "type": "inline_equation", + "content": "\\pmb{q} \\in \\mathbb{R}^{H \\times W \\times C}" + }, + { + "bbox": [ + 55, + 689, + 295, + 713 + ], + "type": "text", + "content": " and a set of support images " + }, + { + "bbox": [ + 55, + 689, + 295, + 713 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 55, + 689, + 295, + 713 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 55, + 689, + 295, + 713 + ], + "type": "inline_equation", + "content": "H, W" + }, + { + "bbox": [ + 55, + 689, + 295, + 713 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 55, + 689, + 295, + 713 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 55, + 689, + 295, + 713 + ], + "type": "text", + "content": " stand for the num-" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 72, + 555, + 517 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 555, + 517 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 555, + 517 + ], + "type": "text", + "content": "ber of height, width and channels, respectively, the DINOv2 ViT backbone is used for obtaining query patches " + }, + { + "bbox": [ + 313, + 72, + 555, + 517 + ], + "type": "inline_equation", + "content": "\\pmb{F}_{q} \\in \\mathbb{R}^{d}" + }, + { + "bbox": [ + 313, + 72, + 555, + 517 + ], + "type": "text", + "content": " and support patches " + }, + { + "bbox": [ + 313, + 72, + 555, + 517 + ], + "type": "inline_equation", + "content": "\\pmb{F}_{s}" + }, + { + "bbox": [ + 313, + 72, + 555, + 517 + ], + "type": "text", + "content": ". Then, two linear layers are applied to project the query patches " + }, + { + "bbox": [ + 313, + 72, + 555, + 517 + ], + "type": "inline_equation", + "content": "\\pmb{F}_{q}" + }, + { + "bbox": [ + 313, + 72, + 555, + 517 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 313, + 72, + 555, + 517 + ], + "type": "inline_equation", + "content": "\\pmb{Q}" + }, + { + "bbox": [ + 313, + 72, + 555, + 517 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 72, + 555, + 517 + ], + "type": "inline_equation", + "content": "\\pmb{K}_{1}" + }, + { + "bbox": [ + 313, + 72, + 555, + 517 + ], + "type": "text", + "content": " and project the support patches " + }, + { + "bbox": [ + 313, + 72, + 555, + 517 + ], + "type": "inline_equation", + "content": "\\pmb{F}_{s}" + }, + { + "bbox": [ + 313, + 72, + 555, + 517 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 313, + 72, + 555, + 517 + ], + "type": "inline_equation", + "content": "\\pmb{K}_{2}" + }, + { + "bbox": [ + 313, + 72, + 555, + 517 + ], + "type": "text", + "content": ". The query patches " + }, + { + "bbox": [ + 313, + 72, + 555, + 517 + ], + "type": "inline_equation", + "content": "\\pmb{F}_{q}" + }, + { + "bbox": [ + 313, + 72, + 555, + 517 + ], + "type": "text", + "content": " and the support patches " + }, + { + "bbox": [ + 313, + 72, + 555, + 517 + ], + "type": "inline_equation", + "content": "\\pmb{F}_{s}" + }, + { + "bbox": [ + 313, + 72, + 555, + 517 + ], + "type": "text", + "content": " are then concatenated to obtain " + }, + { + "bbox": [ + 313, + 72, + 555, + 517 + ], + "type": "inline_equation", + "content": "\\pmb{F}_{cat} = \\text{Concat}(\\pmb{F}_{q}, \\pmb{F}_{s})" + }, + { + "bbox": [ + 313, + 72, + 555, + 517 + ], + "type": "text", + "content": ". The concatenated patches " + }, + { + "bbox": [ + 313, + 72, + 555, + 517 + ], + "type": "inline_equation", + "content": "\\pmb{F}_{cat}" + }, + { + "bbox": [ + 313, + 72, + 555, + 517 + ], + "type": "text", + "content": " are projected to obtain " + }, + { + "bbox": [ + 313, + 72, + 555, + 517 + ], + "type": "inline_equation", + "content": "\\pmb{V}" + }, + { + "bbox": [ + 313, + 72, + 555, + 517 + ], + "type": "text", + "content": ". To align the query patches and the support patches, the proposed method conducts scaled dot product on query patches " + }, + { + "bbox": [ + 313, + 72, + 555, + 517 + ], + "type": "inline_equation", + "content": "\\pmb{F}_{q}" + }, + { + "bbox": [ + 313, + 72, + 555, + 517 + ], + "type": "text", + "content": " and itself to obtain self attention score " + }, + { + "bbox": [ + 313, + 72, + 555, + 517 + ], + "type": "inline_equation", + "content": "A_{self} = \\frac{\\pmb{Q}\\pmb{K}_{1}^{\\top}}{\\sqrt{d}}" + }, + { + "bbox": [ + 313, + 72, + 555, + 517 + ], + "type": "text", + "content": ". Meanwhile, cross-attention score is computed using cosine similarity to ensure scale invariance " + }, + { + "bbox": [ + 313, + 72, + 555, + 517 + ], + "type": "inline_equation", + "content": "A_{cross} = \\frac{\\pmb{Q}\\pmb{K}_{2}^{\\top}}{\\|\\pmb{Q}\\|_{2}\\|\\pmb{K}_{2}\\|_{2} + \\epsilon}" + }, + { + "bbox": [ + 313, + 72, + 555, + 517 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 313, + 72, + 555, + 517 + ], + "type": "inline_equation", + "content": "\\epsilon" + }, + { + "bbox": [ + 313, + 72, + 555, + 517 + ], + "type": "text", + "content": " is a small constant to avoid division by zero. The combined attention score is obtained by concatenating both and then be normalized by the softmax operation " + }, + { + "bbox": [ + 313, + 72, + 555, + 517 + ], + "type": "inline_equation", + "content": "A = \\text{Softmax}(\\text{Concat}(\\pmb{A}_{self}, \\pmb{A}_{cross}))" + }, + { + "bbox": [ + 313, + 72, + 555, + 517 + ], + "type": "text", + "content": ". The refined query representation is obtained by applying attention weights to the value matrix " + }, + { + "bbox": [ + 313, + 72, + 555, + 517 + ], + "type": "inline_equation", + "content": "\\hat{\\pmb{F}}_{q} = \\pmb{F}_{q} + \\pmb{A}\\pmb{V}" + }, + { + "bbox": [ + 313, + 72, + 555, + 517 + ], + "type": "text", + "content": ". With the aligned query patches, the proposed method then generates prototypes with query-perceptual information. To further calibrate support features, their cosine similarity with the refined query is computed: " + }, + { + "bbox": [ + 313, + 72, + 555, + 517 + ], + "type": "inline_equation", + "content": "Sim = \\text{Softmax}\\left(\\frac{\\pmb{F}_{s}\\pmb{F}_{q}^{\\top}}{\\|\\pmb{F}_{s}\\|_{2}\\|\\pmb{F}_{q}\\|_{2} + \\epsilon}\\right)" + }, + { + "bbox": [ + 313, + 72, + 555, + 517 + ], + "type": "text", + "content": ". This similarity is used to re-weight the support representations: " + }, + { + "bbox": [ + 313, + 72, + 555, + 517 + ], + "type": "inline_equation", + "content": "\\hat{\\pmb{F}}_{s} = \\pmb{F}_{s} + Sim*\\hat{\\pmb{F}}_{q}" + }, + { + "bbox": [ + 313, + 72, + 555, + 517 + ], + "type": "text", + "content": ". A learnable weighting function is applied via a sigmoid transformation: " + }, + { + "bbox": [ + 313, + 72, + 555, + 517 + ], + "type": "inline_equation", + "content": "W = Sigmoid(FC(\\hat{\\pmb{F}}_{s}))" + }, + { + "bbox": [ + 313, + 72, + 555, + 517 + ], + "type": "text", + "content": ". Ensuring adaptive feature scaling: " + }, + { + "bbox": [ + 313, + 72, + 555, + 517 + ], + "type": "inline_equation", + "content": "\\hat{\\pmb{F}}_{s} = W\\cdot \\hat{\\pmb{F}}_{s}" + }, + { + "bbox": [ + 313, + 72, + 555, + 517 + ], + "type": "text", + "content": ". The updated support features are then averaged across the K-shot dimension to derive refined prototypes: " + }, + { + "bbox": [ + 313, + 72, + 555, + 517 + ], + "type": "inline_equation", + "content": "P = \\frac{1}{K}\\sum_{i=1}^{K}\\hat{\\pmb{F}}_{s}" + }, + { + "bbox": [ + 313, + 72, + 555, + 517 + ], + "type": "text", + "content": ". Finally, the query-aware prototype refinement is performed using a weighted combination of the refined prototypes and the original prototypes: " + }, + { + "bbox": [ + 313, + 72, + 555, + 517 + ], + "type": "inline_equation", + "content": "\\hat{\\pmb{P}} = \\alpha\\cdot\\pmb{P} + (1-\\alpha)\\cdot\\frac{1}{K}\\sum_{i=1}^{K}\\pmb{F}_{s}" + }, + { + "bbox": [ + 313, + 72, + 555, + 517 + ], + "type": "text", + "content": ". This final prototype representation retains both source-domain knowledge and query-specific adaptability, effectively enhancing cross-domain few-shot detection performance." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 522, + 411, + 534 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 522, + 411, + 534 + ], + "spans": [ + { + "bbox": [ + 313, + 522, + 411, + 534 + ], + "type": "text", + "content": "5.5.2. Training Details" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 537, + 554, + 597 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 537, + 554, + 597 + ], + "spans": [ + { + "bbox": [ + 313, + 537, + 554, + 597 + ], + "type": "text", + "content": "The proposed modules are fine-tuned on novel support images, with the base DE-ViT pretrained on COCO taken as initialization. The SGD with a learning rate of 0.002 is used as the optimizer. All experiments are conducted on two RTX3090 GPUs. The mAPs for 1/5/10 shots are reported." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 608, + 411, + 621 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 608, + 411, + 621 + ], + "spans": [ + { + "bbox": [ + 313, + 608, + 411, + 621 + ], + "type": "text", + "content": "Acknowledgments" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 627, + 554, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 627, + 554, + 712 + ], + "spans": [ + { + "bbox": [ + 313, + 627, + 554, + 712 + ], + "type": "text", + "content": "INSAIT, Sofia University \"St. Kliment Ohridski\". Partially funded by the Ministry of Education and Science of Bulgaria's support for INSAIT as part of the Bulgarian National Roadmap for Research Infrastructure. This work was partially supported by the Humboldt Foundation. We thank the NTIRE 2025 sponsors: ByteDance, Meituan, Kuaishou, and University of Wurzburg (Computer Vision Lab)." + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "spans": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 71, + 186, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 71, + 186, + 83 + ], + "spans": [ + { + "bbox": [ + 56, + 71, + 186, + 83 + ], + "type": "text", + "content": "A. Teams and affiliations" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 56, + 92, + 144, + 104 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 92, + 144, + 104 + ], + "spans": [ + { + "bbox": [ + 56, + 92, + 144, + 104 + ], + "type": "text", + "content": "NTIRE 2025 team" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 111, + 295, + 136 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 111, + 295, + 136 + ], + "spans": [ + { + "bbox": [ + 55, + 111, + 295, + 136 + ], + "type": "text", + "content": "Title: NTIRE 2025 Challenge on Cross-Domain Few-Shot Object Detection: Methods and Results." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 56, + 136, + 100, + 145 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 136, + 100, + 145 + ], + "spans": [ + { + "bbox": [ + 56, + 136, + 100, + 145 + ], + "type": "text", + "content": "Members:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 57, + 148, + 195, + 159 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 148, + 195, + 159 + ], + "spans": [ + { + "bbox": [ + 57, + 148, + 195, + 159 + ], + "type": "text", + "content": "Yuqian Fu1 (yuqian.fu@insait.ai)," + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 57, + 160, + 225, + 171 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 160, + 225, + 171 + ], + "spans": [ + { + "bbox": [ + 57, + 160, + 225, + 171 + ], + "type": "text", + "content": "Xingyu Qiu² (xyqiu24@m.fudan.edu.cn)," + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 57, + 171, + 179, + 182 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 171, + 179, + 182 + ], + "spans": [ + { + "bbox": [ + 57, + 171, + 179, + 182 + ], + "type": "text", + "content": "Bin Ren" + }, + { + "bbox": [ + 57, + 171, + 179, + 182 + ], + "type": "inline_equation", + "content": "^{3,4}" + }, + { + "bbox": [ + 57, + 171, + 179, + 182 + ], + "type": "text", + "content": " (bin.ren@unitn.it)," + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 57, + 183, + 216, + 195 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 183, + 216, + 195 + ], + "spans": [ + { + "bbox": [ + 57, + 183, + 216, + 195 + ], + "type": "text", + "content": "Yanwei " + }, + { + "bbox": [ + 57, + 183, + 216, + 195 + ], + "type": "inline_equation", + "content": "\\mathrm{Fu}^2" + }, + { + "bbox": [ + 57, + 183, + 216, + 195 + ], + "type": "text", + "content": " (yanweifu@fudan.edu.cn)," + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 57, + 196, + 258, + 207 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 196, + 258, + 207 + ], + "spans": [ + { + "bbox": [ + 57, + 196, + 258, + 207 + ], + "type": "text", + "content": "Radu Timofte" + }, + { + "bbox": [ + 57, + 196, + 258, + 207 + ], + "type": "inline_equation", + "content": "^{5}" + }, + { + "bbox": [ + 57, + 196, + 258, + 207 + ], + "type": "text", + "content": " (radu.timofte@uni-wuerzburg.de)," + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 57, + 208, + 203, + 219 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 208, + 203, + 219 + ], + "spans": [ + { + "bbox": [ + 57, + 208, + 203, + 219 + ], + "type": "text", + "content": "Nicu Sebe4 (niculae.sebe@unitn.it)," + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 57, + 220, + 245, + 231 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 220, + 245, + 231 + ], + "spans": [ + { + "bbox": [ + 57, + 220, + 245, + 231 + ], + "type": "text", + "content": "Ming-Hsuan Yang" + }, + { + "bbox": [ + 57, + 220, + 245, + 231 + ], + "type": "inline_equation", + "content": "^{6}" + }, + { + "bbox": [ + 57, + 220, + 245, + 231 + ], + "type": "text", + "content": " (mhyang@ucmerced.edu)," + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 57, + 232, + 215, + 243 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 232, + 215, + 243 + ], + "spans": [ + { + "bbox": [ + 57, + 232, + 215, + 243 + ], + "type": "text", + "content": "Luc Van Gool1 (luc.vangool@insait.ai)" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 56, + 243, + 107, + 255 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 243, + 107, + 255 + ], + "spans": [ + { + "bbox": [ + 56, + 243, + 107, + 255 + ], + "type": "text", + "content": "Affiliations:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 57, + 255, + 293, + 326 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 57, + 255, + 293, + 266 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 255, + 293, + 266 + ], + "spans": [ + { + "bbox": [ + 57, + 255, + 293, + 266 + ], + "type": "text", + "content": "1 INSAIT, Sofia University St. Kliment Ohridski, Bulgaria" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 57, + 267, + 162, + 278 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 267, + 162, + 278 + ], + "spans": [ + { + "bbox": [ + 57, + 267, + 162, + 278 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 57, + 267, + 162, + 278 + ], + "type": "text", + "content": " Fudan University, China" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 57, + 279, + 160, + 290 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 279, + 160, + 290 + ], + "spans": [ + { + "bbox": [ + 57, + 279, + 160, + 290 + ], + "type": "text", + "content": "3 University of Pisa, Italy" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 57, + 291, + 170, + 303 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 291, + 170, + 303 + ], + "spans": [ + { + "bbox": [ + 57, + 291, + 170, + 303 + ], + "type": "text", + "content": "4 University of Trento, Italy" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 57, + 303, + 293, + 315 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 303, + 293, + 315 + ], + "spans": [ + { + "bbox": [ + 57, + 303, + 293, + 315 + ], + "type": "text", + "content": "5 Computer Vision Lab, University of Würzburg, Germany" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 57, + 315, + 262, + 326 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 315, + 262, + 326 + ], + "spans": [ + { + "bbox": [ + 57, + 315, + 262, + 326 + ], + "type": "text", + "content": "6 University of California at Merced, United States" + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 56, + 351, + 106, + 362 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 351, + 106, + 362 + ], + "spans": [ + { + "bbox": [ + 56, + 351, + 106, + 362 + ], + "type": "text", + "content": "MoveFree" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 55, + 370, + 295, + 395 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 370, + 295, + 395 + ], + "spans": [ + { + "bbox": [ + 55, + 370, + 295, + 395 + ], + "type": "text", + "content": "Title: Marrying MoE-powered Grounding DINO with Self-training for Cross-domain Few-shot Object Detection" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 56, + 396, + 100, + 405 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 396, + 100, + 405 + ], + "spans": [ + { + "bbox": [ + 56, + 396, + 100, + 405 + ], + "type": "text", + "content": "Members:" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 57, + 407, + 233, + 418 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 407, + 233, + 418 + ], + "spans": [ + { + "bbox": [ + 57, + 407, + 233, + 418 + ], + "type": "text", + "content": "Kaijin Zhang" + }, + { + "bbox": [ + 57, + 407, + 233, + 418 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 57, + 407, + 233, + 418 + ], + "type": "text", + "content": " (zhang.kaijin1@zte.com.cn)," + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 57, + 419, + 250, + 430 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 419, + 250, + 430 + ], + "spans": [ + { + "bbox": [ + 57, + 419, + 250, + 430 + ], + "type": "text", + "content": "Qingpeng Nong1 (nong.qingpeng@zte.com.cn)," + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 57, + 431, + 250, + 442 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 431, + 250, + 442 + ], + "spans": [ + { + "bbox": [ + 57, + 431, + 250, + 442 + ], + "type": "text", + "content": "Xiugang Dong" + }, + { + "bbox": [ + 57, + 431, + 250, + 442 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 57, + 431, + 250, + 442 + ], + "type": "text", + "content": " (dong.xiugang20@zte.com.cn)," + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 57, + 443, + 205, + 454 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 443, + 205, + 454 + ], + "spans": [ + { + "bbox": [ + 57, + 443, + 205, + 454 + ], + "type": "text", + "content": "Hong Gao" + }, + { + "bbox": [ + 57, + 443, + 205, + 454 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 57, + 443, + 205, + 454 + ], + "type": "text", + "content": " (gao.hong@zte.com.cn)," + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 57, + 455, + 263, + 467 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 455, + 263, + 467 + ], + "spans": [ + { + "bbox": [ + 57, + 455, + 263, + 467 + ], + "type": "text", + "content": "Xiangsheng Zhou1 (zhou.xiangsheng@zte.com.cn)" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 56, + 467, + 107, + 478 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 467, + 107, + 478 + ], + "spans": [ + { + "bbox": [ + 56, + 467, + 107, + 478 + ], + "type": "text", + "content": "Affiliations:" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 57, + 479, + 182, + 489 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 479, + 182, + 489 + ], + "spans": [ + { + "bbox": [ + 57, + 479, + 182, + 489 + ], + "type": "text", + "content": "1 Central R & D Institute, ZTE" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 56, + 514, + 123, + 526 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 514, + 123, + 526 + ], + "spans": [ + { + "bbox": [ + 56, + 514, + 123, + 526 + ], + "type": "text", + "content": "AI4EarthLab" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 55, + 534, + 295, + 569 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 534, + 295, + 569 + ], + "spans": [ + { + "bbox": [ + 55, + 534, + 295, + 569 + ], + "type": "text", + "content": "Title: Enhance Then Search: An Augmentation-Search Strategy with Foundation Models for Cross-Domain Few-Shot Object Detection" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 56, + 571, + 99, + 581 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 571, + 99, + 581 + ], + "spans": [ + { + "bbox": [ + 56, + 571, + 99, + 581 + ], + "type": "text", + "content": "Members:" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 57, + 582, + 257, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 582, + 257, + 594 + ], + "spans": [ + { + "bbox": [ + 57, + 582, + 257, + 594 + ], + "type": "text", + "content": "Jiancheng Pan1 (jiancheng.pan_plus@gmail.com)," + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 57, + 594, + 248, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 594, + 248, + 605 + ], + "spans": [ + { + "bbox": [ + 57, + 594, + 248, + 605 + ], + "type": "text", + "content": "Yanxing Liu" + }, + { + "bbox": [ + 57, + 594, + 248, + 605 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 57, + 594, + 248, + 605 + ], + "type": "text", + "content": " (liuyanxing21@mails.ucas.ac.cn)," + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 57, + 606, + 192, + 617 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 606, + 192, + 617 + ], + "spans": [ + { + "bbox": [ + 57, + 606, + 192, + 617 + ], + "type": "text", + "content": "Xiao He" + }, + { + "bbox": [ + 57, + 606, + 192, + 617 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 57, + 606, + 192, + 617 + ], + "type": "text", + "content": " (xiaohewhu@163.com)," + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 57, + 618, + 244, + 630 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 618, + 244, + 630 + ], + "spans": [ + { + "bbox": [ + 57, + 618, + 244, + 630 + ], + "type": "text", + "content": "Jiahao Li1 (lijiahao23@mails.tsinghua.edu.cn)," + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 57, + 631, + 229, + 641 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 631, + 229, + 641 + ], + "spans": [ + { + "bbox": [ + 57, + 631, + 229, + 641 + ], + "type": "text", + "content": "Yuze Sun" + }, + { + "bbox": [ + 57, + 631, + 229, + 641 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 57, + 631, + 229, + 641 + ], + "type": "text", + "content": " (syz23@mails.tsinghua.edu.cn)," + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 57, + 642, + 230, + 654 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 642, + 230, + 654 + ], + "spans": [ + { + "bbox": [ + 57, + 642, + 230, + 654 + ], + "type": "text", + "content": "Xiaomeng Huang" + }, + { + "bbox": [ + 57, + 642, + 230, + 654 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 57, + 642, + 230, + 654 + ], + "type": "text", + "content": " (hxm@tsinghua.edu.cn)" + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 56, + 654, + 107, + 665 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 654, + 107, + 665 + ], + "spans": [ + { + "bbox": [ + 56, + 654, + 107, + 665 + ], + "type": "text", + "content": "Affiliations:" + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 57, + 666, + 241, + 701 + ], + "type": "list", + "angle": 0, + "index": 43, + "blocks": [ + { + "bbox": [ + 57, + 666, + 146, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 666, + 146, + 677 + ], + "spans": [ + { + "bbox": [ + 57, + 666, + 146, + 677 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 57, + 666, + 146, + 677 + ], + "type": "text", + "content": " Tsinghua University" + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 57, + 678, + 241, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 678, + 241, + 689 + ], + "spans": [ + { + "bbox": [ + 57, + 678, + 241, + 689 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 57, + 678, + 241, + 689 + ], + "type": "text", + "content": " University of Chinese Academy of Sciences" + } + ] + } + ], + "index": 41 + }, + { + "bbox": [ + 57, + 689, + 138, + 701 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 689, + 138, + 701 + ], + "spans": [ + { + "bbox": [ + 57, + 689, + 138, + 701 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 57, + 689, + 138, + 701 + ], + "type": "text", + "content": " Wuhan University" + } + ] + } + ], + "index": 42 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 315, + 72, + 351, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 72, + 351, + 83 + ], + "spans": [ + { + "bbox": [ + 315, + 72, + 351, + 83 + ], + "type": "text", + "content": "IDCFS" + } + ] + } + ], + "index": 44 + }, + { + "bbox": [ + 314, + 89, + 553, + 113 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 89, + 553, + 113 + ], + "spans": [ + { + "bbox": [ + 314, + 89, + 553, + 113 + ], + "type": "text", + "content": "Title: Pseudo-Label Driven Vision-Language Grounding for Cross-Domain Few-Shot Object Detection" + } + ] + } + ], + "index": 45 + }, + { + "bbox": [ + 315, + 114, + 358, + 124 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 114, + 358, + 124 + ], + "spans": [ + { + "bbox": [ + 315, + 114, + 358, + 124 + ], + "type": "text", + "content": "Members:" + } + ] + } + ], + "index": 46 + }, + { + "bbox": [ + 315, + 125, + 498, + 137 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 125, + 498, + 137 + ], + "spans": [ + { + "bbox": [ + 315, + 125, + 498, + 137 + ], + "type": "text", + "content": "Zhenyu Zhang" + }, + { + "bbox": [ + 315, + 125, + 498, + 137 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 315, + 125, + 498, + 137 + ], + "type": "text", + "content": " (m202273680@hust.edu.cn)," + } + ] + } + ], + "index": 47 + }, + { + "bbox": [ + 315, + 137, + 444, + 148 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 137, + 444, + 148 + ], + "spans": [ + { + "bbox": [ + 315, + 137, + 444, + 148 + ], + "type": "text", + "content": "Ran Ma1 (ranma@hust.edu.cn)," + } + ] + } + ], + "index": 48 + }, + { + "bbox": [ + 315, + 149, + 468, + 161 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 149, + 468, + 161 + ], + "spans": [ + { + "bbox": [ + 315, + 149, + 468, + 161 + ], + "type": "text", + "content": "Yuhan Liu1 (yuhan.liu@hust.edu.cn)," + } + ] + } + ], + "index": 49 + }, + { + "bbox": [ + 315, + 162, + 481, + 173 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 162, + 481, + 173 + ], + "spans": [ + { + "bbox": [ + 315, + 162, + 481, + 173 + ], + "type": "text", + "content": "Zijian Zhuang" + }, + { + "bbox": [ + 315, + 162, + 481, + 173 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 315, + 162, + 481, + 173 + ], + "type": "text", + "content": " (zhuangzj@hust.edu.cn)," + } + ] + } + ], + "index": 50 + }, + { + "bbox": [ + 315, + 173, + 451, + 185 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 173, + 451, + 185 + ], + "spans": [ + { + "bbox": [ + 315, + 173, + 451, + 185 + ], + "type": "text", + "content": "Shuai Yi" + }, + { + "bbox": [ + 315, + 173, + 451, + 185 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 315, + 173, + 451, + 185 + ], + "type": "text", + "content": " (yishuai@hust.edu.cn)," + } + ] + } + ], + "index": 51 + }, + { + "bbox": [ + 315, + 186, + 471, + 197 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 186, + 471, + 197 + ], + "spans": [ + { + "bbox": [ + 315, + 186, + 471, + 197 + ], + "type": "text", + "content": "Yixiong Zou1 (yixiongz@hust.edu.cn)" + } + ] + } + ], + "index": 52 + }, + { + "bbox": [ + 315, + 198, + 365, + 208 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 198, + 365, + 208 + ], + "spans": [ + { + "bbox": [ + 315, + 198, + 365, + 208 + ], + "type": "text", + "content": "Affiliations:" + } + ] + } + ], + "index": 53 + }, + { + "bbox": [ + 314, + 209, + 553, + 233 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 209, + 553, + 233 + ], + "spans": [ + { + "bbox": [ + 314, + 209, + 553, + 233 + ], + "type": "text", + "content": "1 School of Computer Science and Technology, Huazhong University of Science and Technology" + } + ] + } + ], + "index": 54 + }, + { + "bbox": [ + 315, + 251, + 418, + 263 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 251, + 418, + 263 + ], + "spans": [ + { + "bbox": [ + 315, + 251, + 418, + 263 + ], + "type": "text", + "content": "FDUROILab_Lenovo" + } + ] + } + ], + "index": 55 + }, + { + "bbox": [ + 314, + 269, + 553, + 293 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 269, + 553, + 293 + ], + "spans": [ + { + "bbox": [ + 314, + 269, + 553, + 293 + ], + "type": "text", + "content": "Title: Efficient Tuning and MLLM-Based Post Prcessing for CDFSOD" + } + ] + } + ], + "index": 56 + }, + { + "bbox": [ + 315, + 293, + 358, + 304 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 293, + 358, + 304 + ], + "spans": [ + { + "bbox": [ + 315, + 293, + 358, + 304 + ], + "type": "text", + "content": "Members:" + } + ] + } + ], + "index": 57 + }, + { + "bbox": [ + 315, + 305, + 493, + 317 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 305, + 493, + 317 + ], + "spans": [ + { + "bbox": [ + 315, + 305, + 493, + 317 + ], + "type": "text", + "content": "Lingyi Hong1 (lyhong22@m.fudan.edu.cn)," + } + ] + } + ], + "index": 58 + }, + { + "bbox": [ + 315, + 318, + 501, + 329 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 318, + 501, + 329 + ], + "spans": [ + { + "bbox": [ + 315, + 318, + 501, + 329 + ], + "type": "text", + "content": "Mingxi Cheng1(mxchen24@m.fudan.edu.cn)," + } + ] + } + ], + "index": 59 + }, + { + "bbox": [ + 315, + 330, + 441, + 341 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 330, + 441, + 341 + ], + "spans": [ + { + "bbox": [ + 315, + 330, + 441, + 341 + ], + "type": "text", + "content": "Runze Li" + }, + { + "bbox": [ + 315, + 330, + 441, + 341 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 315, + 330, + 441, + 341 + ], + "type": "text", + "content": "(lirz7@lenovo.com)," + } + ] + } + ], + "index": 60 + }, + { + "bbox": [ + 315, + 342, + 492, + 353 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 342, + 492, + 353 + ], + "spans": [ + { + "bbox": [ + 315, + 342, + 492, + 353 + ], + "type": "text", + "content": "Xingdong Sheng" + }, + { + "bbox": [ + 315, + 342, + 492, + 353 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 315, + 342, + 492, + 353 + ], + "type": "text", + "content": "(shengxd1@lenovo.com)," + } + ] + } + ], + "index": 61 + }, + { + "bbox": [ + 315, + 354, + 499, + 365 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 354, + 499, + 365 + ], + "spans": [ + { + "bbox": [ + 315, + 354, + 499, + 365 + ], + "type": "text", + "content": "Wenqiang Zhang" + }, + { + "bbox": [ + 315, + 354, + 499, + 365 + ], + "type": "inline_equation", + "content": "^{1,3}" + }, + { + "bbox": [ + 315, + 354, + 499, + 365 + ], + "type": "text", + "content": " (wqzhang@fudan.edu.cn)" + } + ] + } + ], + "index": 62 + }, + { + "bbox": [ + 315, + 366, + 365, + 376 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 366, + 365, + 376 + ], + "spans": [ + { + "bbox": [ + 315, + 366, + 365, + 376 + ], + "type": "text", + "content": "Affiliations:" + } + ] + } + ], + "index": 63 + }, + { + "bbox": [ + 314, + 377, + 553, + 449 + ], + "type": "list", + "angle": 0, + "index": 67, + "blocks": [ + { + "bbox": [ + 315, + 377, + 553, + 400 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 377, + 553, + 400 + ], + "spans": [ + { + "bbox": [ + 315, + 377, + 553, + 400 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 315, + 377, + 553, + 400 + ], + "type": "text", + "content": " Shanghai Key Lab of Intelligent Information Processing, School of Computer Science, Fudan University" + } + ] + } + ], + "index": 64 + }, + { + "bbox": [ + 315, + 401, + 393, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 401, + 393, + 411 + ], + "spans": [ + { + "bbox": [ + 315, + 401, + 393, + 411 + ], + "type": "text", + "content": "2 Lenovo Research" + } + ] + } + ], + "index": 65 + }, + { + "bbox": [ + 314, + 413, + 553, + 449 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 413, + 553, + 449 + ], + "spans": [ + { + "bbox": [ + 314, + 413, + 553, + 449 + ], + "type": "text", + "content": "3 Engineering Research Center of AI & Robotics, Ministry of Education, Academy for Engineering & Technology, Fudan University" + } + ] + } + ], + "index": 66 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 315, + 468, + 367, + 479 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 468, + 367, + 479 + ], + "spans": [ + { + "bbox": [ + 315, + 468, + 367, + 479 + ], + "type": "text", + "content": "HUSTLab" + } + ] + } + ], + "index": 68 + }, + { + "bbox": [ + 314, + 485, + 553, + 509 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 485, + 553, + 509 + ], + "spans": [ + { + "bbox": [ + 314, + 485, + 553, + 509 + ], + "type": "text", + "content": "Title: Prompt and Finetune Grounding DINO for Cross-Domain Few-shot Object Detection" + } + ] + } + ], + "index": 69 + }, + { + "bbox": [ + 315, + 510, + 358, + 520 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 510, + 358, + 520 + ], + "spans": [ + { + "bbox": [ + 315, + 510, + 358, + 520 + ], + "type": "text", + "content": "Members:" + } + ] + } + ], + "index": 70 + }, + { + "bbox": [ + 315, + 521, + 492, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 521, + 492, + 533 + ], + "spans": [ + { + "bbox": [ + 315, + 521, + 492, + 533 + ], + "type": "text", + "content": "Weisen Chen" + }, + { + "bbox": [ + 315, + 521, + 492, + 533 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 315, + 521, + 492, + 533 + ], + "type": "text", + "content": " (U202115027@hust.edu.cn)," + } + ] + } + ], + "index": 71 + }, + { + "bbox": [ + 315, + 534, + 471, + 545 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 534, + 471, + 545 + ], + "spans": [ + { + "bbox": [ + 315, + 534, + 471, + 545 + ], + "type": "text", + "content": "Yongxin Yan" + }, + { + "bbox": [ + 315, + 534, + 471, + 545 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 315, + 534, + 471, + 545 + ], + "type": "text", + "content": " (2585856499@qq.com)," + } + ] + } + ], + "index": 72 + }, + { + "bbox": [ + 315, + 545, + 472, + 557 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 545, + 472, + 557 + ], + "spans": [ + { + "bbox": [ + 315, + 545, + 472, + 557 + ], + "type": "text", + "content": "Xinguo Chen" + }, + { + "bbox": [ + 315, + 545, + 472, + 557 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 315, + 545, + 472, + 557 + ], + "type": "text", + "content": " (327715@whut.edu.cn)," + } + ] + } + ], + "index": 73 + }, + { + "bbox": [ + 315, + 558, + 485, + 569 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 558, + 485, + 569 + ], + "spans": [ + { + "bbox": [ + 315, + 558, + 485, + 569 + ], + "type": "text", + "content": "Yuanjie Shao" + }, + { + "bbox": [ + 315, + 558, + 485, + 569 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 315, + 558, + 485, + 569 + ], + "type": "text", + "content": " (shaoyuanjie@hust.edu.cn)," + } + ] + } + ], + "index": 74 + }, + { + "bbox": [ + 315, + 570, + 497, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 570, + 497, + 581 + ], + "spans": [ + { + "bbox": [ + 315, + 570, + 497, + 581 + ], + "type": "text", + "content": "Zhengrong Zuo" + }, + { + "bbox": [ + 315, + 570, + 497, + 581 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 315, + 570, + 497, + 581 + ], + "type": "text", + "content": " (zhrzuo@main.hust.edu.cn)," + } + ] + } + ], + "index": 75 + }, + { + "bbox": [ + 315, + 582, + 451, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 582, + 451, + 594 + ], + "spans": [ + { + "bbox": [ + 315, + 582, + 451, + 594 + ], + "type": "text", + "content": "Nong Sang" + }, + { + "bbox": [ + 315, + 582, + 451, + 594 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 315, + 582, + 451, + 594 + ], + "type": "text", + "content": " (nsang@hust.edu.cn)" + } + ] + } + ], + "index": 76 + }, + { + "bbox": [ + 315, + 605, + 365, + 616 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 605, + 365, + 616 + ], + "spans": [ + { + "bbox": [ + 315, + 605, + 365, + 616 + ], + "type": "text", + "content": "Affiliations:" + } + ] + } + ], + "index": 77 + }, + { + "bbox": [ + 315, + 617, + 553, + 665 + ], + "type": "list", + "angle": 0, + "index": 80, + "blocks": [ + { + "bbox": [ + 315, + 617, + 553, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 617, + 553, + 640 + ], + "spans": [ + { + "bbox": [ + 315, + 617, + 553, + 640 + ], + "type": "text", + "content": "1 School of Artificial Intelligence and Automation, Huazhong University of Science and Technology" + } + ] + } + ], + "index": 78 + }, + { + "bbox": [ + 315, + 641, + 553, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 641, + 553, + 665 + ], + "spans": [ + { + "bbox": [ + 315, + 641, + 553, + 665 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 315, + 641, + 553, + 665 + ], + "type": "text", + "content": " School of Information Engineering, Wuhan University of Technology" + } + ] + } + ], + "index": 79 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 315, + 672, + 367, + 685 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 672, + 367, + 685 + ], + "spans": [ + { + "bbox": [ + 315, + 672, + 367, + 685 + ], + "type": "text", + "content": "TongjiLab" + } + ] + } + ], + "index": 81 + }, + { + "bbox": [ + 314, + 689, + 553, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 689, + 553, + 714 + ], + "spans": [ + { + "bbox": [ + 314, + 689, + 553, + 714 + ], + "type": "text", + "content": "Title: ProtoDINO: Cross-Domain Few-Shot Object Detection via GroundingDINO and CLIP-Based Prototypes" + } + ] + } + ], + "index": 82 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "spans": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 83 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 72, + 100, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 72, + 100, + 83 + ], + "spans": [ + { + "bbox": [ + 56, + 72, + 100, + 83 + ], + "type": "text", + "content": "Members:" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 56, + 84, + 195, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 84, + 195, + 95 + ], + "spans": [ + { + "bbox": [ + 56, + 84, + 195, + 95 + ], + "type": "text", + "content": "Hao " + }, + { + "bbox": [ + 56, + 84, + 195, + 95 + ], + "type": "inline_equation", + "content": "\\mathbf{W}\\mathbf{u}^{1}" + }, + { + "bbox": [ + 56, + 84, + 195, + 95 + ], + "type": "text", + "content": " (haowu@tongji.edu.cn)," + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 56, + 96, + 110, + 106 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 96, + 110, + 106 + ], + "spans": [ + { + "bbox": [ + 56, + 96, + 110, + 106 + ], + "type": "text", + "content": "Haoran Sun" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 56, + 108, + 107, + 119 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 108, + 107, + 119 + ], + "spans": [ + { + "bbox": [ + 56, + 108, + 107, + 119 + ], + "type": "text", + "content": "Affiliations:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 57, + 120, + 135, + 132 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 120, + 135, + 132 + ], + "spans": [ + { + "bbox": [ + 57, + 120, + 135, + 132 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 57, + 120, + 135, + 132 + ], + "type": "text", + "content": " Tongji University" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 152, + 102, + 163 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 152, + 102, + 163 + ], + "spans": [ + { + "bbox": [ + 56, + 152, + 102, + 163 + ], + "type": "text", + "content": "Manifold" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 56, + 169, + 295, + 194 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 169, + 295, + 194 + ], + "spans": [ + { + "bbox": [ + 56, + 169, + 295, + 194 + ], + "type": "text", + "content": "Title: CDFSOD Challenge: Using Grounding-DINO Proposals and ResNet Embeddings" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 56, + 194, + 99, + 204 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 194, + 99, + 204 + ], + "spans": [ + { + "bbox": [ + 56, + 194, + 99, + 204 + ], + "type": "text", + "content": "Members:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 56, + 205, + 212, + 217 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 205, + 212, + 217 + ], + "spans": [ + { + "bbox": [ + 56, + 205, + 212, + 217 + ], + "type": "text", + "content": "Shuming Hu1 (hsm123@nudt.edu.cn)," + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 56, + 217, + 109, + 229 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 217, + 109, + 229 + ], + "spans": [ + { + "bbox": [ + 56, + 217, + 109, + 229 + ], + "type": "text", + "content": "Yan Zhang1," + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 56, + 230, + 119, + 241 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 230, + 119, + 241 + ], + "spans": [ + { + "bbox": [ + 56, + 230, + 119, + 241 + ], + "type": "text", + "content": "Zhiguang Shi1," + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 56, + 242, + 105, + 253 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 242, + 105, + 253 + ], + "spans": [ + { + "bbox": [ + 56, + 242, + 105, + 253 + ], + "type": "text", + "content": "Yu Zhang1," + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 56, + 254, + 110, + 265 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 254, + 110, + 265 + ], + "spans": [ + { + "bbox": [ + 56, + 254, + 110, + 265 + ], + "type": "text", + "content": "Chao Chen1," + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 56, + 266, + 100, + 277 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 266, + 100, + 277 + ], + "spans": [ + { + "bbox": [ + 56, + 266, + 100, + 277 + ], + "type": "text", + "content": "Tao Wang" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 56, + 278, + 107, + 289 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 278, + 107, + 289 + ], + "spans": [ + { + "bbox": [ + 56, + 278, + 107, + 289 + ], + "type": "text", + "content": "Affiliations:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 57, + 289, + 239, + 301 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 289, + 239, + 301 + ], + "spans": [ + { + "bbox": [ + 57, + 289, + 239, + 301 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 57, + 289, + 239, + 301 + ], + "type": "text", + "content": " National University of Defense Technology" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 56, + 321, + 84, + 332 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 321, + 84, + 332 + ], + "spans": [ + { + "bbox": [ + 56, + 321, + 84, + 332 + ], + "type": "text", + "content": "MXT" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 56, + 339, + 295, + 363 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 339, + 295, + 363 + ], + "spans": [ + { + "bbox": [ + 56, + 339, + 295, + 363 + ], + "type": "text", + "content": "Title: Domain Adaptation Enhancement Module (DAEM) for Cross-Domain Few-Shot Object Detection" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 56, + 364, + 99, + 373 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 364, + 99, + 373 + ], + "spans": [ + { + "bbox": [ + 56, + 364, + 99, + 373 + ], + "type": "text", + "content": "Members:" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 56, + 375, + 204, + 386 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 375, + 204, + 386 + ], + "spans": [ + { + "bbox": [ + 56, + 375, + 204, + 386 + ], + "type": "text", + "content": "Da Feng" + }, + { + "bbox": [ + 56, + 375, + 204, + 386 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 56, + 375, + 204, + 386 + ], + "type": "text", + "content": " (072108208@fzu.edu.cn)," + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 56, + 387, + 208, + 399 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 387, + 208, + 399 + ], + "spans": [ + { + "bbox": [ + 56, + 387, + 208, + 399 + ], + "type": "text", + "content": "Linhai Zhuo" + }, + { + "bbox": [ + 56, + 387, + 208, + 399 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 56, + 387, + 208, + 399 + ], + "type": "text", + "content": " (534537916@qq.com)," + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 56, + 399, + 108, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 399, + 108, + 411 + ], + "spans": [ + { + "bbox": [ + 56, + 399, + 108, + 411 + ], + "type": "text", + "content": "Ziming Lin" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 56, + 411, + 107, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 411, + 107, + 422 + ], + "spans": [ + { + "bbox": [ + 56, + 411, + 107, + 422 + ], + "type": "text", + "content": "Affiliations:" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 57, + 423, + 139, + 435 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 423, + 139, + 435 + ], + "spans": [ + { + "bbox": [ + 57, + 423, + 139, + 435 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 57, + 423, + 139, + 435 + ], + "type": "text", + "content": " Fuzhou University" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 56, + 455, + 89, + 465 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 455, + 89, + 465 + ], + "spans": [ + { + "bbox": [ + 56, + 455, + 89, + 465 + ], + "type": "text", + "content": "X-Few" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 56, + 472, + 295, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 472, + 295, + 495 + ], + "spans": [ + { + "bbox": [ + 56, + 472, + 295, + 495 + ], + "type": "text", + "content": "Title: IFC: Instance Feature Caching for Cross-Domain Few-Shot Object Detection" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 56, + 497, + 99, + 506 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 497, + 99, + 506 + ], + "spans": [ + { + "bbox": [ + 56, + 497, + 99, + 506 + ], + "type": "text", + "content": "Members:" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 56, + 508, + 216, + 520 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 508, + 216, + 520 + ], + "spans": [ + { + "bbox": [ + 56, + 508, + 216, + 520 + ], + "type": "text", + "content": "Yali Huang" + }, + { + "bbox": [ + 56, + 508, + 216, + 520 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 56, + 508, + 216, + 520 + ], + "type": "text", + "content": " (hyl2024@gs.zzu.edu.cn)," + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 56, + 521, + 208, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 521, + 208, + 533 + ], + "spans": [ + { + "bbox": [ + 56, + 521, + 208, + 533 + ], + "type": "text", + "content": "Jie Mei" + }, + { + "bbox": [ + 56, + 521, + 208, + 533 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 56, + 521, + 208, + 533 + ], + "type": "text", + "content": " (mj123123@gs.zzu.edu.cn)," + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 56, + 533, + 228, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 533, + 228, + 544 + ], + "spans": [ + { + "bbox": [ + 56, + 533, + 228, + 544 + ], + "type": "text", + "content": "Yiming Yang1 (yangyim637@gmail.com)," + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 56, + 544, + 235, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 544, + 235, + 555 + ], + "spans": [ + { + "bbox": [ + 56, + 544, + 235, + 555 + ], + "type": "text", + "content": "Mi Guo" + }, + { + "bbox": [ + 56, + 544, + 235, + 555 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 56, + 544, + 235, + 555 + ], + "type": "text", + "content": " (mimi987836724@gs.zzu.edu.cn)," + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 56, + 555, + 224, + 568 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 555, + 224, + 568 + ], + "spans": [ + { + "bbox": [ + 56, + 555, + 224, + 568 + ], + "type": "text", + "content": "Mingyuan Jiu" + }, + { + "bbox": [ + 56, + 555, + 224, + 568 + ], + "type": "inline_equation", + "content": "^{1,2,3}" + }, + { + "bbox": [ + 56, + 555, + 224, + 568 + ], + "type": "text", + "content": " (iemyjiu@zzu.edu.cn)," + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 56, + 568, + 250, + 580 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 568, + 250, + 580 + ], + "spans": [ + { + "bbox": [ + 56, + 568, + 250, + 580 + ], + "type": "text", + "content": "Mingliang Xu" + }, + { + "bbox": [ + 56, + 568, + 250, + 580 + ], + "type": "inline_equation", + "content": "^{1,2,3}" + }, + { + "bbox": [ + 56, + 568, + 250, + 580 + ], + "type": "text", + "content": " (iexumingliang@zzu.edu.cn)" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 56, + 581, + 107, + 591 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 581, + 107, + 591 + ], + "spans": [ + { + "bbox": [ + 56, + 581, + 107, + 591 + ], + "type": "text", + "content": "Affiliations:" + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 56, + 592, + 295, + 652 + ], + "type": "list", + "angle": 0, + "index": 37, + "blocks": [ + { + "bbox": [ + 56, + 592, + 295, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 592, + 295, + 616 + ], + "spans": [ + { + "bbox": [ + 56, + 592, + 295, + 616 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 56, + 592, + 295, + 616 + ], + "type": "text", + "content": " School of Computer and Artificial Intelligence, Zhengzhou University" + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 56, + 616, + 295, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 616, + 295, + 640 + ], + "spans": [ + { + "bbox": [ + 56, + 616, + 295, + 640 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 56, + 616, + 295, + 640 + ], + "type": "text", + "content": " Engineering Research Center of Intelligent Swarm Systems, Ministry of Education, Zhengzhou University" + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 56, + 640, + 257, + 652 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 640, + 257, + 652 + ], + "spans": [ + { + "bbox": [ + 56, + 640, + 257, + 652 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 56, + 640, + 257, + 652 + ], + "type": "text", + "content": " National SuperComputing Center in Zhengzhou" + } + ] + } + ], + "index": 36 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 56, + 671, + 80, + 682 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 671, + 80, + 682 + ], + "spans": [ + { + "bbox": [ + 56, + 671, + 80, + 682 + ], + "type": "text", + "content": "MM" + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 55, + 689, + 295, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 689, + 295, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 689, + 295, + 713 + ], + "type": "text", + "content": "Title: DFE-ViT: Dual Feature Enhancement Network for Cross-Domain Few-Shot Object Detection." + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 314, + 72, + 358, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 72, + 358, + 83 + ], + "spans": [ + { + "bbox": [ + 314, + 72, + 358, + 83 + ], + "type": "text", + "content": "Members:" + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 314, + 84, + 513, + 96 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 84, + 513, + 96 + ], + "spans": [ + { + "bbox": [ + 314, + 84, + 513, + 96 + ], + "type": "text", + "content": "Maomao Xiong" + }, + { + "bbox": [ + 314, + 84, + 513, + 96 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 314, + 84, + 513, + 96 + ], + "type": "text", + "content": " (202314866@mail.sdu.edu.cn)," + } + ] + } + ], + "index": 41 + }, + { + "bbox": [ + 314, + 97, + 506, + 108 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 97, + 506, + 108 + ], + "spans": [ + { + "bbox": [ + 314, + 97, + 506, + 108 + ], + "type": "text", + "content": "Qunshu Zhang" + }, + { + "bbox": [ + 314, + 97, + 506, + 108 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 314, + 97, + 506, + 108 + ], + "type": "text", + "content": " (202414859@mail.sdu.edu.cn)," + } + ] + } + ], + "index": 42 + }, + { + "bbox": [ + 314, + 109, + 488, + 120 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 109, + 488, + 120 + ], + "spans": [ + { + "bbox": [ + 314, + 109, + 488, + 120 + ], + "type": "text", + "content": "Xinyu Cao" + }, + { + "bbox": [ + 314, + 109, + 488, + 120 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 314, + 109, + 488, + 120 + ], + "type": "text", + "content": " (202414842@mail.sdu.edu.cn)" + } + ] + } + ], + "index": 43 + }, + { + "bbox": [ + 315, + 121, + 366, + 131 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 121, + 366, + 131 + ], + "spans": [ + { + "bbox": [ + 315, + 121, + 366, + 131 + ], + "type": "text", + "content": "Affiliations:" + } + ] + } + ], + "index": 44 + }, + { + "bbox": [ + 315, + 132, + 408, + 144 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 132, + 408, + 144 + ], + "spans": [ + { + "bbox": [ + 315, + 132, + 408, + 144 + ], + "type": "text", + "content": "1 Shandong University" + } + ] + } + ], + "index": 45 + }, + { + "bbox": [ + 315, + 163, + 338, + 174 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 163, + 338, + 174 + ], + "spans": [ + { + "bbox": [ + 315, + 163, + 338, + 174 + ], + "type": "text", + "content": "FSV" + } + ] + } + ], + "index": 46 + }, + { + "bbox": [ + 314, + 180, + 553, + 204 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 180, + 553, + 204 + ], + "spans": [ + { + "bbox": [ + 314, + 180, + 553, + 204 + ], + "type": "text", + "content": "Title: Enhanced Prototype-based Cross-domain Few-shot Object Detection" + } + ] + } + ], + "index": 47 + }, + { + "bbox": [ + 315, + 205, + 358, + 215 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 205, + 358, + 215 + ], + "spans": [ + { + "bbox": [ + 315, + 205, + 358, + 215 + ], + "type": "text", + "content": "Members:" + } + ] + } + ], + "index": 48 + }, + { + "bbox": [ + 315, + 217, + 484, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 217, + 484, + 228 + ], + "spans": [ + { + "bbox": [ + 315, + 217, + 484, + 228 + ], + "type": "text", + "content": "Yuqing Yang1 (yyqyang101@gmail.com)" + } + ] + } + ], + "index": 49 + }, + { + "bbox": [ + 315, + 229, + 365, + 240 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 229, + 365, + 240 + ], + "spans": [ + { + "bbox": [ + 315, + 229, + 365, + 240 + ], + "type": "text", + "content": "Affiliations:" + } + ] + } + ], + "index": 50 + }, + { + "bbox": [ + 315, + 240, + 427, + 253 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 240, + 427, + 253 + ], + "spans": [ + { + "bbox": [ + 315, + 240, + 427, + 253 + ], + "type": "text", + "content": "1 George Mason University" + } + ] + } + ], + "index": 51 + }, + { + "bbox": [ + 315, + 272, + 337, + 282 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 272, + 337, + 282 + ], + "spans": [ + { + "bbox": [ + 315, + 272, + 337, + 282 + ], + "type": "text", + "content": "IPC" + } + ] + } + ], + "index": 52 + }, + { + "bbox": [ + 314, + 289, + 553, + 312 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 289, + 553, + 312 + ], + "spans": [ + { + "bbox": [ + 314, + 289, + 553, + 312 + ], + "type": "text", + "content": "Title: Test-time Adaptation Strategy for Cross-Domain Few-Shot Object Detection" + } + ] + } + ], + "index": 53 + }, + { + "bbox": [ + 315, + 314, + 358, + 323 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 314, + 358, + 323 + ], + "spans": [ + { + "bbox": [ + 315, + 314, + 358, + 323 + ], + "type": "text", + "content": "Members:" + } + ] + } + ], + "index": 54 + }, + { + "bbox": [ + 315, + 325, + 503, + 337 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 325, + 503, + 337 + ], + "spans": [ + { + "bbox": [ + 315, + 325, + 503, + 337 + ], + "type": "text", + "content": "Dianmo Sheng" + }, + { + "bbox": [ + 315, + 325, + 503, + 337 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 315, + 325, + 503, + 337 + ], + "type": "text", + "content": " (dmsheng@mail.ustc.edu.cn)," + } + ] + } + ], + "index": 55 + }, + { + "bbox": [ + 315, + 337, + 378, + 349 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 337, + 378, + 349 + ], + "spans": [ + { + "bbox": [ + 315, + 337, + 378, + 349 + ], + "type": "text", + "content": "Xuanpu Zhao1," + } + ] + } + ], + "index": 56 + }, + { + "bbox": [ + 315, + 349, + 359, + 361 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 349, + 359, + 361 + ], + "spans": [ + { + "bbox": [ + 315, + 349, + 359, + 361 + ], + "type": "text", + "content": "Zhiyu Li1," + } + ] + } + ], + "index": 57 + }, + { + "bbox": [ + 315, + 361, + 375, + 373 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 361, + 375, + 373 + ], + "spans": [ + { + "bbox": [ + 315, + 361, + 375, + 373 + ], + "type": "text", + "content": "Xuyang Ding" + } + ] + } + ], + "index": 58 + }, + { + "bbox": [ + 315, + 373, + 365, + 384 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 373, + 365, + 384 + ], + "spans": [ + { + "bbox": [ + 315, + 373, + 365, + 384 + ], + "type": "text", + "content": "Affiliations:" + } + ] + } + ], + "index": 59 + }, + { + "bbox": [ + 315, + 385, + 514, + 397 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 385, + 514, + 397 + ], + "spans": [ + { + "bbox": [ + 315, + 385, + 514, + 397 + ], + "type": "text", + "content": "1 University of Science and Technology of China" + } + ] + } + ], + "index": 60 + }, + { + "bbox": [ + 315, + 416, + 337, + 428 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 416, + 337, + 428 + ], + "spans": [ + { + "bbox": [ + 315, + 416, + 337, + 428 + ], + "type": "text", + "content": "LJY" + } + ] + } + ], + "index": 61 + }, + { + "bbox": [ + 314, + 434, + 553, + 457 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 434, + 553, + 457 + ], + "spans": [ + { + "bbox": [ + 314, + 434, + 553, + 457 + ], + "type": "text", + "content": "Title: Similarity-Calibrated Prototype Refinement for Cross-Domain Few-Shot Object Detection" + } + ] + } + ], + "index": 62 + }, + { + "bbox": [ + 315, + 459, + 358, + 468 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 459, + 358, + 468 + ], + "spans": [ + { + "bbox": [ + 315, + 459, + 358, + 468 + ], + "type": "text", + "content": "Members:" + } + ] + } + ], + "index": 63 + }, + { + "bbox": [ + 315, + 470, + 471, + 482 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 470, + 471, + 482 + ], + "spans": [ + { + "bbox": [ + 315, + 470, + 471, + 482 + ], + "type": "text", + "content": "Wenqian Li (wenqianli.li@seu.edu.cn)" + } + ] + } + ], + "index": 64 + }, + { + "bbox": [ + 315, + 483, + 365, + 494 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 483, + 365, + 494 + ], + "spans": [ + { + "bbox": [ + 315, + 483, + 365, + 494 + ], + "type": "text", + "content": "Affiliations:" + } + ] + } + ], + "index": 65 + }, + { + "bbox": [ + 315, + 495, + 400, + 506 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 495, + 400, + 506 + ], + "spans": [ + { + "bbox": [ + 315, + 495, + 400, + 506 + ], + "type": "text", + "content": "Southeast University" + } + ] + } + ], + "index": 66 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "spans": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 67 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 72, + 115, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 72, + 115, + 83 + ], + "spans": [ + { + "bbox": [ + 56, + 72, + 115, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 57, + 91, + 296, + 713 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 61, + 91, + 296, + 134 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 91, + 296, + 134 + ], + "spans": [ + { + "bbox": [ + 61, + 91, + 296, + 134 + ], + "type": "text", + "content": "[1] Shuai Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Sibo Song, Kai Dang, Peng Wang, Shijie Wang, Jun Tang, et al. Qwen2. 5-vl technical report. arXiv preprint arXiv:2502.13923, 2025.8, 9" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 61, + 135, + 296, + 168 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 135, + 296, + 168 + ], + "spans": [ + { + "bbox": [ + 61, + 135, + 296, + 168 + ], + "type": "text", + "content": "[2] Weilin Cai, Juyong Jiang, Fan Wang, Jing Tang, Sunghun Kim, and Jiayi Huang. A survey on mixture of experts. arXiv preprint arXiv:2407.06204, 2024. 5" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 62, + 170, + 296, + 233 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 170, + 296, + 233 + ], + "spans": [ + { + "bbox": [ + 62, + 170, + 296, + 233 + ], + "type": "text", + "content": "[3] Zheng Chen, Kai Liu, Jue Gong, Jingkai Wang, Lei Sun, Zongwei Wu, Radu Timofte, Yulun Zhang, et al. Ntire 2025 challenge on image super-resolution " + }, + { + "bbox": [ + 62, + 170, + 296, + 233 + ], + "type": "inline_equation", + "content": "(\\times 4)" + }, + { + "bbox": [ + 62, + 170, + 296, + 233 + ], + "type": "text", + "content": ": Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 62, + 236, + 296, + 300 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 236, + 296, + 300 + ], + "spans": [ + { + "bbox": [ + 62, + 236, + 296, + 300 + ], + "type": "text", + "content": "[4] Zheng Chen, Jingkai Wang, Kai Liu, Jue Gong, Lei Sun, Zongwei Wu, Radu Timofte, Yulun Zhang, et al. Ntire 2025 challenge on real-world face restoration: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 62, + 302, + 296, + 346 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 302, + 296, + 346 + ], + "spans": [ + { + "bbox": [ + 62, + 302, + 296, + 346 + ], + "type": "text", + "content": "[5] Marcos Conde, Radu Timofte, et al. Ntire 2025 challenge on raw image restoration and super-resolution. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 62, + 346, + 296, + 391 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 346, + 296, + 391 + ], + "spans": [ + { + "bbox": [ + 62, + 346, + 296, + 391 + ], + "type": "text", + "content": "[6] Marcos Conde, Radu Timofte, et al. Raw image reconstruction from RGB on smartphones. ntire 2025 challenge report. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 62, + 392, + 296, + 468 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 392, + 296, + 468 + ], + "spans": [ + { + "bbox": [ + 62, + 392, + 296, + 468 + ], + "type": "text", + "content": "[7] Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. Bert: Pre-training of deep bidirectional transformers for language understanding. In Proceedings of the 2019 conference of the North American chapter of the association for computational linguistics: human language technologies, volume 1 (long and short papers), pages 4171–4186, 2019. 7" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 62, + 469, + 296, + 501 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 469, + 296, + 501 + ], + "spans": [ + { + "bbox": [ + 62, + 469, + 296, + 501 + ], + "type": "text", + "content": "[8] Geir Drange. Arthropod taxonomy orders object detection dataset. In https://doi.org/10.34740/kaggle/dsv/1240192, 2019.2" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 62, + 502, + 296, + 590 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 502, + 296, + 590 + ], + "spans": [ + { + "bbox": [ + 62, + 502, + 296, + 590 + ], + "type": "text", + "content": "[9] Egor Ershov, Sergey Korchagin, Alexei Khalin, Artyom Panshin, Arseniy Terekhin, Ekaterina Zaychenkova, Georgiy Lobarev, Vsevolod Plokhotnyuk, Denis Abramov, Elisey Zhdanov, Sofia Dorogova, Yasin Mamedov, Nikola Banic, Georgii Perevozhikov, Radu Timofte, et al. Ntire 2025 challenge on night photography rendering. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 57, + 591, + 295, + 634 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 591, + 295, + 634 + ], + "spans": [ + { + "bbox": [ + 57, + 591, + 295, + 634 + ], + "type": "text", + "content": "[10] William Fedus, Barret Zoph, and Noam Shazeer. Switch transformers: Scaling to trillion parameter models with simple and efficient sparsity. Journal of Machine Learning Research, 23(120):1-39, 2022. 5" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 57, + 635, + 295, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 635, + 295, + 689 + ], + "spans": [ + { + "bbox": [ + 57, + 635, + 295, + 689 + ], + "type": "text", + "content": "[11] Shenghao Fu, Qize Yang, Qijie Mo, Junkai Yan, Xihan Wei, Jingke Meng, Xiaohua Xie, and Wei-Shi Zheng. Llmdet: Learning strong open-vocabulary object detectors under the supervision of large language models. arXiv preprint arXiv:2501.18954, 2025. 8, 9" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 57, + 691, + 295, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 691, + 295, + 713 + ], + "spans": [ + { + "bbox": [ + 57, + 691, + 295, + 713 + ], + "type": "text", + "content": "[12] Yuqian Fu, Yanwei Fu, and Yu-Gang Jiang. Meta-fdmixup: Cross-domain few-shot learning guided by labeled target" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 316, + 73, + 555, + 712 + ], + "type": "list", + "angle": 0, + "index": 26, + "blocks": [ + { + "bbox": [ + 333, + 73, + 553, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 333, + 73, + 553, + 95 + ], + "spans": [ + { + "bbox": [ + 333, + 73, + 553, + 95 + ], + "type": "text", + "content": "data. In Proceedings of the 29th ACM international conference on multimedia, pages 5326-5334, 2021. 1" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 316, + 96, + 555, + 148 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 96, + 555, + 148 + ], + "spans": [ + { + "bbox": [ + 316, + 96, + 555, + 148 + ], + "type": "text", + "content": "[13] Yuqian Fu, Yu Xie, Yanwei Fu, Jingjing Chen, and Yu-Gang Jiang. Me-d2n: Multi-expert domain decompositional network for cross-domain few-shot learning. In Proceedings of the 30th ACM international conference on multimedia, pages 6609-6617, 2022." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 316, + 150, + 553, + 183 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 150, + 553, + 183 + ], + "spans": [ + { + "bbox": [ + 316, + 150, + 553, + 183 + ], + "type": "text", + "content": "[14] Yuqian Fu, Yu Xie, Yanwei Fu, and Yu-Gang Jiang. Styleadv: Meta style adversarial training for cross-domain few-shot learning. In CVPR, 2023. 1" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 317, + 184, + 553, + 248 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 184, + 553, + 248 + ], + "spans": [ + { + "bbox": [ + 317, + 184, + 553, + 248 + ], + "type": "text", + "content": "[15] Yuqian Fu, Yu Wang, Yixuan Pan, Lian Huai, Xingyu Qiu, Zeyu Shangguan, Tong Liu, Yanwei Fu, Luc Van Gool, and Xingqun Jiang. Cross-domain few-shot object detection via enhanced open-set object detector. In European Conference on Computer Vision, pages 247-264. Springer, 2024. 1, 2, 4, 6" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 316, + 250, + 553, + 316 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 250, + 553, + 316 + ], + "spans": [ + { + "bbox": [ + 316, + 250, + 553, + 316 + ], + "type": "text", + "content": "[16] Yuqian Fu, Xingyu Qiu, Bin Ren Yanwei Fu, Radu Timofte, Nicu Sebe, Ming-Hsuan Yang, Luc Van Gool, et al. Ntire 2025 challenge on cross-domain few-shot object detection: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 317, + 316, + 553, + 380 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 316, + 553, + 380 + ], + "spans": [ + { + "bbox": [ + 317, + 316, + 553, + 380 + ], + "type": "text", + "content": "[17] Golnaz Ghiasi, Yin Cui, Aravind Srinivas, Rui Qian, Tsung-Yi Lin, Ekin D Cubuk, Quoc V Le, and Barret Zoph. Simple copy-paste is a strong data augmentation method for instance segmentation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 2918-2928, 2021. 9" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 317, + 383, + 553, + 449 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 383, + 553, + 449 + ], + "spans": [ + { + "bbox": [ + 317, + 383, + 553, + 449 + ], + "type": "text", + "content": "[18] Yunhui Guo, Noel C Codella, Leonid Karlinsky, James V Codella, John R Smith, Kate Saenko, Tajana Rosing, and Rogerio Feris. A broader study of cross-domain few-shot learning. In Computer vision-ECCV 2020: 16th European conference, Glasgow, UK, August 23-28, 2020, proceedings, part XXVII 16, pages 124-141. Springer, 2020. 1" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 317, + 449, + 553, + 513 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 449, + 553, + 513 + ], + "spans": [ + { + "bbox": [ + 317, + 449, + 553, + 513 + ], + "type": "text", + "content": "[19] Shuhao Han, Haotian Fan, Fangyuan Kong, Wenjie Liao, Chunle Guo, Chongyi Li, Radu Timofte, et al. Ntire 2025 challenge on text to image generation model quality assessment. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 317, + 514, + 553, + 568 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 514, + 553, + 568 + ], + "spans": [ + { + "bbox": [ + 317, + 514, + 553, + 568 + ], + "type": "text", + "content": "[20] Meng-Ru Hsieh, Yen-Liang Lin, and Winston H Hsu. Drone-based object counting by spatially regularized regional proposal network. In Proceedings of the IEEE international conference on computer vision, pages 4145-4153, 2017. 1, 2, 8, 9, 13" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 317, + 570, + 553, + 613 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 570, + 553, + 613 + ], + "spans": [ + { + "bbox": [ + 317, + 570, + 553, + 613 + ], + "type": "text", + "content": "[21] Edward J Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, Weizhu Chen, et al. Lora: Low-rank adaptation of large language models. *ICLR*, 1(2):3, 2022. 7" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 317, + 614, + 553, + 669 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 614, + 553, + 669 + ], + "spans": [ + { + "bbox": [ + 317, + 614, + 553, + 669 + ], + "type": "text", + "content": "[22] Gabriel Ilharco, Mitchell Wortsman, Ross Wightman, Cade Gordon, Nicholas Carlini, Rohan Taori, Achal Dave, Vaishaal Shankar, Hongseok Namkoong, John Miller, Hannaneh Hajishirzi, Ali Farhadi, and Ludwig Schmidt. Openclip, 2021. 10" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 316, + 670, + 553, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 670, + 553, + 712 + ], + "spans": [ + { + "bbox": [ + 316, + 670, + 553, + 712 + ], + "type": "text", + "content": "[23] Naoto Inoue, Ryosuke Furuta, Toshihiko Yamasaki, and Kiyoharu Aizawa. Cross-domain weakly-supervised object detection through progressive domain adaptation. In CVPR, 2018. 2" + } + ] + } + ], + "index": 25 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 732, + 310, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 732, + 310, + 742 + ], + "spans": [ + { + "bbox": [ + 300, + 732, + 310, + 742 + ], + "type": "text", + "content": "19" + } + ] + } + ], + "index": 27 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 18 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 73, + 297, + 715 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 56, + 73, + 297, + 139 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 73, + 297, + 139 + ], + "spans": [ + { + "bbox": [ + 56, + 73, + 297, + 139 + ], + "type": "text", + "content": "[24] Varun Jain, Zongwei Wu, Quan Zou, Louis Florentin, Henrik Turbell, Sandeep Siddhartha, Radu Timofte, et al. Ntire 2025 challenge on video quality enhancement for video conferencing: Datasets, methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 56, + 140, + 295, + 194 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 140, + 295, + 194 + ], + "spans": [ + { + "bbox": [ + 56, + 140, + 295, + 194 + ], + "type": "text", + "content": "[25] Albert Q Jiang, Alexandre Sablayrolles, Antoine Roux, Arthur Mensch, Blanche Savary, Chris Bamford, Devendra Singh Chaplot, Diego de las Casas, Emma Bou Hanna, Florian Bressand, et al. Mixtral of experts. arXiv preprint arXiv:2401.04088, 2024. 5" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 56, + 195, + 295, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 195, + 295, + 239 + ], + "spans": [ + { + "bbox": [ + 56, + 195, + 295, + 239 + ], + "type": "text", + "content": "[26] Lihao Jiang, Yi Wang, Qi Jia, Shengwei Xu, Yu Liu, Xin Fan, Haojie Li, Risheng Liu, Xinwei Xue, and Ruili Wang. Underwater species detection using channel sharpening attention. In ACM MM, 2021. 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 56, + 239, + 295, + 293 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 239, + 295, + 293 + ], + "spans": [ + { + "bbox": [ + 56, + 239, + 295, + 293 + ], + "type": "text", + "content": "[27] Aishwarya Kamath, Mannat Singh, Yann LeCun, Gabriel Synnaeve, Ishan Misra, and Nicolas Carion. Mdetr-modulated detection for end-to-end multi-modal understanding. In Proceedings of the IEEE/CVF international conference on computer vision, pages 1780-1790, 2021. 7, 8" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 56, + 294, + 295, + 337 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 294, + 295, + 337 + ], + "spans": [ + { + "bbox": [ + 56, + 294, + 295, + 337 + ], + "type": "text", + "content": "[28] Mona Köhler, Markus Eisenbach, and Horst-Michael Gross. Few-shot object detection: A comprehensive survey. IEEE Transactions on Neural Networks and Learning Systems, 2023. 1" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 338, + 295, + 403 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 338, + 295, + 403 + ], + "spans": [ + { + "bbox": [ + 56, + 338, + 295, + 403 + ], + "type": "text", + "content": "[29] Ranjay Krishna, Yuke Zhu, Oliver Groth, Justin Johnson, Kenji Hata, Joshua Kravitz, Stephanie Chen, Yannis Kalantidis, Li-Jia Li, David A Shamma, et al. Visual genome: Connecting language and vision using crowdsourced dense image annotations. International journal of computer vision, 123:32-73, 2017. 7" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 56, + 404, + 295, + 449 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 404, + 295, + 449 + ], + "spans": [ + { + "bbox": [ + 56, + 404, + 295, + 449 + ], + "type": "text", + "content": "[30] Alex Krizhevsky, Ilya Sutskever, and Geoffrey E Hinton. Imagenet classification with deep convolutional neural networks. In Advances in Neural Information Processing Systems, pages 1097-1105, 2012." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 56, + 449, + 295, + 525 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 449, + 295, + 525 + ], + "spans": [ + { + "bbox": [ + 56, + 449, + 295, + 525 + ], + "type": "text", + "content": "[31] Alina Kuznetsova, Hassan Rom, Neil Alldrin, Jasper Uijlings, Ivan Krasin, Jordi Pont-Tuset, Shahab Kamali, Stefan Popov, Matteo Malloci, Alexander Kolesnikov, et al. The open images dataset v4: Unified image classification, object detection, and visual relationship detection at scale. International journal of computer vision, 128(7):1956-1981, 2020. 7, 8" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 56, + 526, + 295, + 592 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 526, + 295, + 592 + ], + "spans": [ + { + "bbox": [ + 56, + 526, + 295, + 592 + ], + "type": "text", + "content": "[32] Sangmin Lee, Eunpil Park, Angel Canelo, Hyunhee Park, Youngjo Kim, Hyungju Chun, Xin Jin, Chongyi Li, Chun-Le Guo, Radu Timofte, et al. Ntire 2025 challenge on efficient burst hdr and restoration: Datasets, methods, and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 56, + 592, + 295, + 656 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 592, + 295, + 656 + ], + "spans": [ + { + "bbox": [ + 56, + 592, + 295, + 656 + ], + "type": "text", + "content": "[33] Chunyuan Li, Haotian Liu, Liunian Li, Pengchuan Zhang, Jyoti Aneja, Jianwei Yang, Ping Jin, Houdong Hu, Zicheng Liu, Yong Jae Lee, et al. Elevater: A benchmark and toolkit for evaluating language-augmented visual models. Advances in Neural Information Processing Systems, 35:9287-9301, 2022. 8" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 56, + 658, + 295, + 691 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 658, + 295, + 691 + ], + "spans": [ + { + "bbox": [ + 56, + 658, + 295, + 691 + ], + "type": "text", + "content": "[34] Ke Li, Gang Wan, Gong Cheng, Liqui Meng, and Junwei Han. Object detection in optical remote sensing images: A survey and a new benchmark. ISPRS, 2020. 2" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 56, + 692, + 295, + 715 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 692, + 295, + 715 + ], + "spans": [ + { + "bbox": [ + 56, + 692, + 295, + 715 + ], + "type": "text", + "content": "[35] Liunian Harold Li, Pengchuan Zhang, Haotian Zhang, Jianwei Yang, Chunyuan Li, Yiwu Zhong, Lijuan Wang, Lu" + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 316, + 73, + 553, + 713 + ], + "type": "list", + "angle": 0, + "index": 23, + "blocks": [ + { + "bbox": [ + 333, + 73, + 553, + 118 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 333, + 73, + 553, + 118 + ], + "spans": [ + { + "bbox": [ + 333, + 73, + 553, + 118 + ], + "type": "text", + "content": "Yuan, Lei Zhang, Jenq-Neng Hwang, et al. Grounded language-image pre-training. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 10965-10975, 2022. 4, 7" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 316, + 119, + 553, + 163 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 119, + 553, + 163 + ], + "spans": [ + { + "bbox": [ + 316, + 119, + 553, + 163 + ], + "type": "text", + "content": "[36] Wei-Hong Li, Xialei Liu, and Hakan Bilen. Cross-domain few-shot learning with task-specific adapters. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 7161-7170, 2022. 1" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 316, + 164, + 553, + 241 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 164, + 553, + 241 + ], + "spans": [ + { + "bbox": [ + 316, + 164, + 553, + 241 + ], + "type": "text", + "content": "[37] Xin Li, Yeying Jin, Xin Jin, Zongwei Wu, Bingchen Li, Yufei Wang, Wenhan Yang, Yu Li, Zhibo Chen, Bihan Wen, Robby Tan, Radu Timofte, et al. Ntire 2025 challenge on day and night raindrop removal for dual-focused images: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 316, + 243, + 553, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 243, + 553, + 319 + ], + "spans": [ + { + "bbox": [ + 316, + 243, + 553, + 319 + ], + "type": "text", + "content": "[38] Xin Li, Xijun Wang, Bingchen Li, Kun Yuan, Yizhen Shao, Suhang Yao, Ming Sun, Chao Zhou, Radu Timofte, and Zhibo Chen. Ntire 2025 challenge on short-formUGC video quality assessment and enhancement: Kwaisr dataset and study. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 316, + 321, + 553, + 398 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 321, + 553, + 398 + ], + "spans": [ + { + "bbox": [ + 316, + 321, + 553, + 398 + ], + "type": "text", + "content": "[39] Xin Li, Kun Yuan, Bingchen Li, Fengbin Guan, Yizhen Shao, Zihao Yu, Xijun Wang, Yiting Lu, Wei Luo, Suhang Yao, Ming Sun, Chao Zhou, Zhibo Chen, Radu Timofte, et al. Ntire 2025 challenge on short-formUGC video quality assessment and enhancement: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 316, + 399, + 553, + 465 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 399, + 553, + 465 + ], + "spans": [ + { + "bbox": [ + 316, + 399, + 553, + 465 + ], + "type": "text", + "content": "[40] Jie Liang, Radu Timofte, Qiaosi Yi, Zhengqiang Zhang, Shuaizheng Liu, Lingchen Sun, Rongyuan Wu, Xindong Zhang, Hui Zeng, Lei Zhang, et al. Ntire 2025 the 2nd restore any image model (RAIM) in the wild challenge. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 316, + 467, + 553, + 533 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 467, + 553, + 533 + ], + "spans": [ + { + "bbox": [ + 316, + 467, + 553, + 533 + ], + "type": "text", + "content": "[41] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dólar, and C Lawrence Zitnick. Microsoft coco: Common objects in context. In Computer vision-ECCV 2014: 13th European conference, zurich, Switzerland, September 6-12, 2014, proceedings, part v 13, pages 740-755. Springer, 2014. 1, 2, 8" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 316, + 535, + 553, + 589 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 535, + 553, + 589 + ], + "spans": [ + { + "bbox": [ + 316, + 535, + 553, + 589 + ], + "type": "text", + "content": "[42] Aixin Liu, Bei Feng, Bin Wang, Bingxuan Wang, Bo Liu, Chenggang Zhao, Chengqi Dengr, Chong Ruan, Damai Dai, Daya Guo, et al. Deepseek-v2: A strong, economical, and efficient mixture-of-experts language model. arXiv preprint arXiv:2405.04434, 2024. 5" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 316, + 591, + 553, + 657 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 591, + 553, + 657 + ], + "spans": [ + { + "bbox": [ + 316, + 591, + 553, + 657 + ], + "type": "text", + "content": "[43] Shilong Liu, Zhaoyang Zeng, Tianhe Ren, Feng Li, Hao Zhang, Jie Yang, Qing Jiang, Chunyuan Li, Jianwei Yang, Hang Su, et al. Grounding dino: Marrying dino with grounded pre-training for open-set object detection. In European Conference on Computer Vision, pages 38-55. Springer, 2024. 4, 6, 7, 9, 10" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 316, + 658, + 553, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 658, + 553, + 713 + ], + "spans": [ + { + "bbox": [ + 316, + 658, + 553, + 713 + ], + "type": "text", + "content": "[44] Xiaohong Liu, Xiongkuo Min, Qiang Hu, Xiaoyun Zhang, Jie Guo, et al. Ntire 2025 XGC quality assessment challenge: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + } + ] + } + ], + "index": 22 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 732, + 311, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 732, + 311, + 742 + ], + "spans": [ + { + "bbox": [ + 299, + 732, + 311, + 742 + ], + "type": "text", + "content": "20" + } + ] + } + ], + "index": 24 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 19 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 73, + 294, + 713 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 56, + 73, + 294, + 139 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 73, + 294, + 139 + ], + "spans": [ + { + "bbox": [ + 56, + 73, + 294, + 139 + ], + "type": "text", + "content": "[45] Xiaoning Liu, Zongwei Wu, Florin-Alexandru Vasluianu, Hailong Yan, Bin Ren, Yulun Zhang, Shuhang Gu, Le Zhang, Ce Zhu, Radu Timofte, et al. Ntire 2025 challenge on low light image enhancement: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 56, + 140, + 294, + 194 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 140, + 294, + 194 + ], + "spans": [ + { + "bbox": [ + 56, + 140, + 294, + 194 + ], + "type": "text", + "content": "[46] Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, and Baining Guo. Swin transformer: Hierarchical vision transformer using shifted windows. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 10012-10022, 2021. 7, 9" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 56, + 195, + 294, + 236 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 195, + 294, + 236 + ], + "spans": [ + { + "bbox": [ + 56, + 195, + 294, + 236 + ], + "type": "text", + "content": "[47] Alexander Neubeck and Luc Van Gool. Efficient nonmaximum suppression. In 18th international conference on pattern recognition (ICPR'06), pages 850-855. IEEE, 2006. 7" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 56, + 239, + 294, + 293 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 239, + 294, + 293 + ], + "spans": [ + { + "bbox": [ + 56, + 239, + 294, + 293 + ], + "type": "text", + "content": "[48] Maxime Oquab, Timothee Darct, Theo Moutakanni, Huy Vo, Marc Szafraniec, Vasil Khalidov, Pierre Fernandez, Daniel Haziza, Francisco Massa, Alaaeldin El-Nouby, et al. Dinov2: Learning robust visual features without supervision. arXiv preprint arXiv:2304.07193, 2023. 14" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 56, + 294, + 294, + 337 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 294, + 294, + 337 + ], + "spans": [ + { + "bbox": [ + 56, + 294, + 294, + 337 + ], + "type": "text", + "content": "[49] Vicente Ordonez, Girish Kulkarni, and Tamara Berg. Im2text: Describing images using 1 million captioned photographs. Advances in neural information processing systems, 24, 2011. 7" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 338, + 294, + 381 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 338, + 294, + 381 + ], + "spans": [ + { + "bbox": [ + 56, + 338, + 294, + 381 + ], + "type": "text", + "content": "[50] Hongpeng Pan, Shifeng Yi, Shouwei Yang, Lei Qi, Bing Hu, Yi Xu, and Yang Yang. The solution for cvpr2024 foundational few-shot object detection challenge. arXiv preprint arXiv:2406.12225, 2024. 9" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 56, + 383, + 294, + 435 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 383, + 294, + 435 + ], + "spans": [ + { + "bbox": [ + 56, + 383, + 294, + 435 + ], + "type": "text", + "content": "[51] Jiancheng Pan, Yanxing Liu, Yuqian Fu, Muyuan Ma, Jiaohao Li, Danda Pani Paudel, Luc Van Gool, and Xiaomeng Huang. Locate anything on earth: Advancing open-vocabulary object detection for remote sensing community, 2024. 6" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 56, + 437, + 294, + 470 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 437, + 294, + 470 + ], + "spans": [ + { + "bbox": [ + 56, + 437, + 294, + 470 + ], + "type": "text", + "content": "[52] Jiancheng Pan, Muyuan Ma, Qing Ma, Cong Bai, and Shengyong Chen. Pir: Remote sensing image-text retrieval with prior instruction representation learning, 2024. 6" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 56, + 472, + 294, + 503 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 472, + 294, + 503 + ], + "spans": [ + { + "bbox": [ + 56, + 472, + 294, + 503 + ], + "type": "text", + "content": "[53] Limeng Qiao, Yuxuan Zhao, Zhiyuan Li, Xi Qiu, Jianan Wu, and Chi Zhang. Defrcn: Decoupled faster r-cnn for few-shot object detection. In ICCV, 2021. 1" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 56, + 505, + 294, + 568 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 505, + 294, + 568 + ], + "spans": [ + { + "bbox": [ + 56, + 505, + 294, + 568 + ], + "type": "text", + "content": "[54] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PmLR, 2021. 10" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 56, + 571, + 294, + 624 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 571, + 294, + 624 + ], + "spans": [ + { + "bbox": [ + 56, + 571, + 294, + 624 + ], + "type": "text", + "content": "[55] Bin Ren, Yahui Liu, Yue Song, Wei Bi, Rita Cucchiara, Nicu Sebe, and Wei Wang. Masked jigsaw puzzle: A versatile position embedding for vision transformers. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 20382-20391, 2023. 1" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 56, + 625, + 294, + 679 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 625, + 294, + 679 + ], + "spans": [ + { + "bbox": [ + 56, + 625, + 294, + 679 + ], + "type": "text", + "content": "[56] Bin Ren, Yawei Li, Jingyun Liang, Rakesh Ranjan, Mengyuan Liu, Rita Cucchiara, Luc V Gool, Ming-Hsuan Yang, and Nicu Sebe. Sharing key semantics in transformer makes efficient image restoration. Advances in Neural Information Processing Systems, 37:7427-7463, 2024. 1" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 56, + 681, + 294, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 681, + 294, + 713 + ], + "spans": [ + { + "bbox": [ + 56, + 681, + 294, + 713 + ], + "type": "text", + "content": "[57] Bin Ren, Hang Guo, Lei Sun, Zongwei Wu, Radu Timofte, Yawei Li, et al. The tenth nitre 2025 efficient superresolution challenge report. In Proceedings of the IEEE/CVF" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 316, + 73, + 555, + 713 + ], + "type": "list", + "angle": 0, + "index": 28, + "blocks": [ + { + "bbox": [ + 335, + 73, + 553, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 335, + 73, + 553, + 95 + ], + "spans": [ + { + "bbox": [ + 335, + 73, + 553, + 95 + ], + "type": "text", + "content": "Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 316, + 97, + 553, + 150 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 97, + 553, + 150 + ], + "spans": [ + { + "bbox": [ + 316, + 97, + 553, + 150 + ], + "type": "text", + "content": "[58] Tianhe Ren, Qing Jiang, Shilong Liu, Zhaoyang Zeng, Wenlong Liu, Han Gao, Hongjie Huang, Zhengyu Ma, Xiaoke Jiang, Yihao Chen, et al. Grounding dino 1.5: Advance the\" edge\" of open-set object detection. arXiv preprint arXiv:2405.10300, 2024. 4" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 316, + 152, + 553, + 195 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 152, + 553, + 195 + ], + "spans": [ + { + "bbox": [ + 316, + 152, + 553, + 195 + ], + "type": "text", + "content": "[59] Xiaoqian Ruan and Wei Tang. Fully test-time adaptation for object detection. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1038-1047, 2024. 15" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 316, + 198, + 553, + 240 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 198, + 553, + 240 + ], + "spans": [ + { + "bbox": [ + 316, + 198, + 553, + 240 + ], + "type": "text", + "content": "[60] Inkyu Sa, Zongyuan Ge, Feras Dayoub, Ben Upcroft, Tristan Perez, and Chris McCool. Deepfruits: A fruit detection system using deep neural networks. sensors, 16(8):1222, 2016. 1, 2, 8, 9, 13" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 316, + 243, + 553, + 308 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 243, + 553, + 308 + ], + "spans": [ + { + "bbox": [ + 316, + 243, + 553, + 308 + ], + "type": "text", + "content": "[61] Nickolay Safonov, Alexey Bryntsev, Andrey Moskalenko, Dmitry Kulikov, Dmitriy Vatolin, Radu Timofte, et al. Ntire 2025 challenge on UGC video enhancement: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 316, + 309, + 553, + 353 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 309, + 553, + 353 + ], + "spans": [ + { + "bbox": [ + 316, + 309, + 553, + 353 + ], + "type": "text", + "content": "[62] Alzayat Saleh, Issam H Laradji, Dmitry A Konovalov, Michael Bradley, David Vazquez, and Marcus Sheaves. A realistic fish-habitat dataset to evaluate algorithms for underwater visual analysis. Scientific Reports, 2020. 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 316, + 354, + 553, + 398 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 354, + 553, + 398 + ], + "spans": [ + { + "bbox": [ + 316, + 354, + 553, + 398 + ], + "type": "text", + "content": "[63] Zeyu Shangguan and Mohammad Rostami. Identification of novel classes for improving few-shot object detection. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 3356-3366, 2023. 1" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 316, + 399, + 553, + 431 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 399, + 553, + 431 + ], + "spans": [ + { + "bbox": [ + 316, + 399, + 553, + 431 + ], + "type": "text", + "content": "[64] Zeyu Shangguan and Mohammad Rostami. Improved region proposal network for enhanced few-shot object detection. Neural Networks, 180:106699, 2024. 1" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 316, + 434, + 553, + 487 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 434, + 553, + 487 + ], + "spans": [ + { + "bbox": [ + 316, + 434, + 553, + 487 + ], + "type": "text", + "content": "[65] Shuai Shao, Zeming Li, Tianyuan Zhang, Chao Peng, Gang Yu, Xiangyu Zhang, Jing Li, and Jian Sun. Objects365: A large-scale, high-quality dataset for object detection. In Proceedings of the IEEE/CVF international conference on computer vision, pages 8430-8439, 2019. 8" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 316, + 490, + 553, + 521 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 490, + 553, + 521 + ], + "spans": [ + { + "bbox": [ + 316, + 490, + 553, + 521 + ], + "type": "text", + "content": "[66] Jake Snell, Kevin Swersky, and Richard Zemel. Prototypical networks for few-shot learning. Advances in neural information processing systems, 30, 2017. 10" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 316, + 523, + 553, + 555 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 523, + 553, + 555 + ], + "spans": [ + { + "bbox": [ + 316, + 523, + 553, + 555 + ], + "type": "text", + "content": "[67] Kechen Song and Yunhui Yan. A noise robust method based on completed local binary patterns for hot-rolled steel strip surface defects. Applied Surface Science, 2013. 2" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 316, + 558, + 553, + 590 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 558, + 553, + 590 + ], + "spans": [ + { + "bbox": [ + 316, + 558, + 553, + 590 + ], + "type": "text", + "content": "[68] Bo Sun, Banghuai Li, Shengcai Cai, Ye Yuan, and Chi Zhang. Fsce: Few-shot object detection via contrastive proposal encoding. In CVPR, 2021. 1" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 316, + 592, + 555, + 657 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 592, + 555, + 657 + ], + "spans": [ + { + "bbox": [ + 316, + 592, + 555, + 657 + ], + "type": "text", + "content": "[69] Lei Sun, Andrea Alfarano, Peiqi Duan, Shaolin Su, Kaiwei Wang, Boxin Shi, Radu Timofte, Danda Pani Paudel, Luc Van Gool, et al. Ntire 2025 challenge on event-based image deblurring: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 316, + 658, + 553, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 658, + 553, + 713 + ], + "spans": [ + { + "bbox": [ + 316, + 658, + 553, + 713 + ], + "type": "text", + "content": "[70] Lei Sun, Hang Guo, Bin Ren, Luc Van Gool, Radu Timofte, Yawei Li, et al. The tenth nitre 2025 image denoising challenge report. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + } + ] + } + ], + "index": 27 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 732, + 310, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 732, + 310, + 742 + ], + "spans": [ + { + "bbox": [ + 299, + 732, + 310, + 742 + ], + "type": "text", + "content": "21" + } + ] + } + ], + "index": 29 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 20 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 73, + 294, + 712 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 56, + 73, + 294, + 115 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 73, + 294, + 115 + ], + "spans": [ + { + "bbox": [ + 56, + 73, + 294, + 115 + ], + "type": "text", + "content": "[71] Hao Tang, Chengcheng Yuan, Zechao Li, and Jinhui Tang. Learning attention-guided pyramidal features for few-shot fine-grained recognition. Pattern Recognition, 130:108792, 2022. 1" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 56, + 118, + 294, + 162 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 118, + 294, + 162 + ], + "spans": [ + { + "bbox": [ + 56, + 118, + 294, + 162 + ], + "type": "text", + "content": "[72] Hung-Yu Tseng, Hsin-Ying Lee, Jia-Bin Huang, and Ming-Hsuan Yang. Cross-domain few-shot classification via learned feature-wise transformation. arXiv preprint arXiv:2001.08735, 2020. 1" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 56, + 164, + 294, + 218 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 164, + 294, + 218 + ], + "spans": [ + { + "bbox": [ + 56, + 164, + 294, + 218 + ], + "type": "text", + "content": "[73] Florin-Alexandru Vasluianu, Tim Seizinger, Zhuyun Zhou, Cailian Chen, Zongwei Wu, Radu Timofte, et al. Ntire 2025 image shadow removal challenge report. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 56, + 220, + 294, + 274 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 220, + 294, + 274 + ], + "spans": [ + { + "bbox": [ + 56, + 220, + 294, + 274 + ], + "type": "text", + "content": "[74] Florin-Alexandru Vasluianu, Tim Seizinger, Zhuyun Zhou, Zongwei Wu, Radu Timofte, et al. Ntire 2025 ambient lighting normalization challenge. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 56, + 276, + 294, + 308 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 276, + 294, + 308 + ], + "spans": [ + { + "bbox": [ + 56, + 276, + 294, + 308 + ], + "type": "text", + "content": "[75] Xin Wang, Thomas E Huang, Trevor Darrell, Joseph E Gonzalez, and Fisher Yu. Frustratingly simple few-shot object detection. arXiv preprint arXiv:2003.06957, 2020. 1" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 310, + 294, + 354 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 310, + 294, + 354 + ], + "spans": [ + { + "bbox": [ + 56, + 310, + 294, + 354 + ], + "type": "text", + "content": "[76] Xinkuang Wang, Wenjing Li, and Zhongcheng Wu. Cardd: A new dataset for vision-based car damage detection. IEEE Transactions on Intelligent Transportation Systems, 24(7): 7202-7214, 2023. 1, 2, 9, 13" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 56, + 356, + 294, + 421 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 356, + 294, + 421 + ], + "spans": [ + { + "bbox": [ + 56, + 356, + 294, + 421 + ], + "type": "text", + "content": "[77] Yingqian Wang, Zhengyu Liang, Fengyuan Zhang, Lvli Tian, Longguang Wang, Juncheng Li, Jungang Yang, Radu Timofte, Yulan Guo, et al. Ntire 2025 challenge on light field image super-resolution: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 56, + 423, + 294, + 477 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 423, + 294, + 477 + ], + "spans": [ + { + "bbox": [ + 56, + 423, + 294, + 477 + ], + "type": "text", + "content": "[78] Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chau-mond, Clement Delangue, Anthony Moi, Pierrick Cistac, Tim Rault, Rémi Louf, Morgan Funtowicz, et al. Huggingface's transformers: State-of-the-art natural language processing. arXiv preprint arXiv:1910.03771, 2019. 8" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 56, + 479, + 294, + 521 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 479, + 294, + 521 + ], + "spans": [ + { + "bbox": [ + 56, + 479, + 294, + 521 + ], + "type": "text", + "content": "[79] Dongxian Wu, Shu-Tao Xia, and Yisen Wang. Adversarial weight perturbation helps robust generalization. Advances in neural information processing systems, 33:2958-2969, 2020. 9" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 56, + 524, + 294, + 567 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 524, + 294, + 567 + ], + "spans": [ + { + "bbox": [ + 56, + 524, + 294, + 567 + ], + "type": "text", + "content": "[80] Fuzhao Xue, Zian Zheng, Yao Fu, Jinjie Ni, Zangwei Zheng, Wangchunshu Zhou, and Yang You. Openmoe: An early effort on open mixture-of-experts language models. arXiv preprint arXiv:2402.01739, 2024. 5" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 56, + 570, + 294, + 645 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 570, + 294, + 645 + ], + "spans": [ + { + "bbox": [ + 56, + 570, + 294, + 645 + ], + "type": "text", + "content": "[81] Kangning Yang, Jie Cai, Ling Ouyang, Florin-Alexandru Vasluianu, Radu Timofte, Jiaming Ding, Huiming Sun, Lan Fu, Jinlong Li, Chiu Man Ho, Zibo Meng, et al. Ntire 2025 challenge on single image reflection removal in the wild: Datasets, methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 56, + 647, + 294, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 647, + 294, + 712 + ], + "spans": [ + { + "bbox": [ + 56, + 647, + 294, + 712 + ], + "type": "text", + "content": "[82] Pierluigi Zama Ramirez, Fabio Tosi, Luigi Di Stefano, Radu Timofte, Alex Costanzino, Matteo Poggi, Samuele Salti, Stefano Mattoccia, et al. Ntire 2025 challenge on hr depth from images of specular and transparent surfaces. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 316, + 73, + 553, + 329 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 316, + 73, + 553, + 126 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 73, + 553, + 126 + ], + "spans": [ + { + "bbox": [ + 316, + 73, + 553, + 126 + ], + "type": "text", + "content": "[83] Zican Zha, Hao Tang, Yunlian Sun, and Jinhui Tang. Boosting few-shot fine-grained recognition with background suppression and foreground alignment. IEEE Transactions on Circuits and Systems for Video Technology, 33(8):3947-3961, 2023. 1" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 316, + 129, + 553, + 183 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 129, + 553, + 183 + ], + "spans": [ + { + "bbox": [ + 316, + 129, + 553, + 183 + ], + "type": "text", + "content": "[84] Ji Zhang, Jingkuan Song, Lianli Gao, and Hengtao Shen. Free-lunch for cross-domain few-shot learning: Style-aware episodic training with robust contrastive learning. In Proceedings of the 30th ACM international conference on multimedia, pages 2586-2594, 2022. 1" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 316, + 185, + 553, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 185, + 553, + 217 + ], + "spans": [ + { + "bbox": [ + 316, + 185, + 553, + 217 + ], + "type": "text", + "content": "[85] Xinyu Zhang, Yuhan Liu, Yuting Wang, and Abdeslam Boularias. Detect everything with few examples. arXiv preprint arXiv:2309.12969, 2023. 1, 3" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 316, + 220, + 553, + 271 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 220, + 553, + 271 + ], + "spans": [ + { + "bbox": [ + 316, + 220, + 553, + 271 + ], + "type": "text", + "content": "[86] Linhai Zhuo, Yuqian Fu, Jingjing Chen, Yixin Cao, and YuGang Jiang. Tgdm: Target guided dynamic mixup for cross-domain few-shot learning. In Proceedings of the 30th ACM International Conference on Multimedia, pages 6368-6376, 2022. 1" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 316, + 274, + 553, + 329 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 274, + 553, + 329 + ], + "spans": [ + { + "bbox": [ + 316, + 274, + 553, + 329 + ], + "type": "text", + "content": "[87] Linhai Zhuo, Yuqian Fu, Jingjing Chen, Yixin Cao, and YuGang Jiang. Unified view empirical study for large pretrained model on cross-domain few-shot learning. ACM Transactions on Multimedia Computing, Communications and Applications, 20(9):1-18, 2024. 1" + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 732, + 310, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 732, + 310, + 741 + ], + "spans": [ + { + "bbox": [ + 299, + 732, + 310, + 741 + ], + "type": "text", + "content": "22" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 21 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2504_10xxx/2504.10686/02e14e26-d981-43b7-bd68-0bb6d5c44d72_content_list.json b/data/2025/2504_10xxx/2504.10686/02e14e26-d981-43b7-bd68-0bb6d5c44d72_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..2275fed22946af220e6d8a28f5bf16a23f813543 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/02e14e26-d981-43b7-bd68-0bb6d5c44d72_content_list.json @@ -0,0 +1,9700 @@ +[ + { + "type": "text", + "text": "The Tenth NTIRE 2025 Efficient Super-Resolution Challenge Report", + "text_level": 1, + "bbox": [ + 151, + 130, + 843, + 151 + ], + "page_idx": 0 + }, + { + "type": "table", + "img_path": "images/9b50cbffaa73a484b07fe9e673836900b5b744821aa2f3a6b5870cf1b1837401.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Bin Ren*Hang Guo*Lei Sun*Zongwei Wu*Radu Timofte*Yawei Li*
Yao ZhangXinning ChaiZhengxue ChengYingsheng QinYucai Yang
Li SongHongyuan YuPufan XuCheng WanZhijuan HuangPeng Guo
Shuyuan CuiChenjun LiXuehai HuPan PanXin ZhangHeng Zhang
Qing LuoLinyan JiangHaibo LeiQifang GaoYaqing LiWeihua Luo
Tsing LiQing WangYi LiuYang WangHongyu AnLiou Zhang
Shijie ZhaoLianhong SongLong SunJinshan PanJiangxin DongJinhui Tang
Jing WeiMengyang WangRuilong GuoQian WangQingliang Liu
Yang ChengDavinciEnxuan GuPinxin LiuYongsheng YuHang Hua
Yunlong TangShihao WangYukun YangZhiyu ZhangYukun YangJiyu Wu
Jiancheng HuangYifan LiuYi HuangShifeng ChenRui ChenYi Feng
Mingxi LiCailu WanXiangji WuZibin LiuJinyang ZhongKihwan Yoon
Ganzorig GankhuyagShengyun ZhongMingyang WuRenjie LiYushen Zuo
Zhengzhong TuZongang GaoGuannan ChenYuan TianWenhui Chen
Weijun YuanZhan LiYihang ChenYifan DengRuting DengYilin Zhang
Huan ZhengYanyan WeiWenxuan ZhaoSuiyi ZhaoFei WangKun Li
Yinggan TangMengjie SuJae-hyeon LeeDong-Hyeop SonUi-Jin Choi
Tiancheng ShaoYuqing ZhangMengcheng MaDonggeun KoYoungsang Kwak
Jiun LeeJaehwa KwakYuxuan JiangQiang ZhuSiyue TengFan Zhang
Shuyuan ZhuBing ZengDavid BullJing HuHui DengXuan Zhang
Lin ZhuQinrui FanWeijian DengJunnan WuWenqin DengYuquan Liu
Zhaohong XuJameer Babu PinjariKuldeep PurohitZeyu XiaoZhuoyuan Li
Surya VashisthAkshay DudhanePraful HambardeSachin Chaudhary
Satya Naryan TaziPrashant PatilSantosh Kumar VipparthiSubrahmanyam Murala
Wei-Chen ShenI-Hsiang ChenYunzhe XuChen ZhaoZhizhou Chen
Akram Khatami-RiziAhmad Mahmoudi-AznavehAlejandro MerinoBruno Longarela
Javier AbadMarcos V. CondeSimone BiancoLuca CogoGianmarco Corti
", + "bbox": [ + 102, + 179, + 890, + 657 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 248, + 691, + 325, + 705 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "This paper presents a comprehensive review of the NTIRE 2025 Challenge on Single-Image Efficient Super-Resolution", + "bbox": [ + 89, + 723, + 480, + 753 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "(ESR). The challenge aimed to advance the development of deep models that optimize key computational metrics, i.e., runtime, parameters, and FLOPs, while achieving a PSNR of at least 26.90 dB on the DIV2K_LSDIR_valid dataset and 26.99 dB on the DIV2K_LSDIR_test dataset. A robust participation saw 244 registered entrants, with 43 teams submitting valid entries. This report meticulously analyzes these methods and results, emphasizing groundbreaking advancements in state-of-the-art single-image ESR techniques. The analysis highlights innovative approaches and establishes benchmarks for future research in the field.", + "bbox": [ + 511, + 691, + 906, + 859 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.10686v1 [cs.CV] 14 Apr 2025", + "bbox": [ + 22, + 262, + 60, + 704 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "* B. Ren (bin.ren@unitn.it, University of Pisa & University of Trento, Italy), H. Guo (cshguo@gmail.com, Tsinghua University), L. Sun (lei.sun@insait.ai,INSAIT, Sofia University\"St. Kliment Ohridski\"), Z. Wu (zongwei.wu@uni-wuerzburg.de, University of Würzburg, Germany), R. Timofte (Radu.Timofte@uni-wuerzburg.de, University of Würzburg, Germany), and Y. Li (yawei.li@vision.ee.ethz.ch, ETH Zürich, Switzerland) were the challenge organizers, while the other authors participated in the challenge.", + "bbox": [ + 89, + 767, + 482, + 864 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "Appendix A contains the authors' teams and affiliations.", + "bbox": [ + 91, + 864, + 388, + 875 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "NTIRE 2025 webpage: https://cvslai.net/ntire/2025/.", + "bbox": [ + 91, + 876, + 457, + 887 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "Code: https://github.com/Amazingren/NTIRE2025_ESR/.", + "bbox": [ + 91, + 888, + 477, + 898 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 493, + 925, + 501, + 935 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 91, + 89, + 222, + 104 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Single image super-resolution (SR) is designed to reconstruct a high-resolution (HR) image from a single low-resolution (LR) image, typically affected by blurring and down-sampling. The standard degradation model in traditional SR, bicubic down-sampling, allows for consistent benchmarks and systematic comparisons among different SR methods. This framework also serves as a platform to highlight the advances in SR technologies. SR techniques are widely used in fields such as satellite imaging, medical image enhancement, and surveillance, where improved image quality is essential for accurate interpretation and analysis.", + "bbox": [ + 89, + 114, + 480, + 296 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "State-of-the-art deep neural networks for image superresolution (SR) often suffer from overparameterization, intensive computation, and high latency, making their deployment on mobile devices for real-time SR applications challenging. To address these limitations, extensive research has focused on improving network efficiency through techniques such as network pruning, low-rank filter decomposition, network quantization, neural architecture search, state space modeling, diffusion priors, and knowledge distillation [76, 79, 89, 90, 129, 143, 146, 148]. These compression methods, successfully applied to image SR, optimize both the computational footprint and the operational speed [8, 91, 123].", + "bbox": [ + 89, + 296, + 482, + 492 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Efficient SR is particularly crucial for edge computing and mobile devices, where processing power, energy availability, and memory are limited. The enhanced efficiency of SR models ensures that these devices can execute high-quality image processing in real-time without exhausting system resources or draining battery life rapidly. Metrics like runtime, parameter count, and computational complexity (FLOPs) are vital for assessing the suitability of SR models for edge deployment. These parameters are key in maintaining a balance between performance and resource use, ensuring that mobile devices can deliver advanced imaging capabilities efficiently. This balance is critical for the widespread adoption of advanced SR techniques in everyday applications, driving the development of AI-enabled technologies that are both powerful and accessible.", + "bbox": [ + 89, + 492, + 482, + 718 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In collaboration with the 2025 New Trends in Image Restoration and Enhancement (NTIRE 2025) workshop, we organize the challenge on single-image efficient superresolution. The challenge's goal is to super-resolve an LR image with a magnification factor of $\\times 4$ using a network that reduces aspects such as runtime, parameters, FLOPs, of EFDN [116], while at least maintaining the $26.90~\\mathrm{dB}$ on the DIV2K_LSDIR_valid dataset, and $26.99\\mathrm{dB}$ on the DIV2K_LSDIR_test dataset. This challenge aims to discover advanced and innovative solutions for efficient SR, benchmark their efficiency, and identify general trends for the design of future efficient SR networks.", + "bbox": [ + 89, + 719, + 482, + 900 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "This challenge is one of the NTIRE 2025 Workshop associated challenges on: ambient lighting normalization [106], reflection removal in the wild [125], shadow removal [105], event-based image deblurring [97], image denoising [98], XGC quality assessment [74], UGC video enhancement [93], night photography rendering [28], image super-resolution (x4) [12], real-world face restoration [13], efficient super-resolution [92], HR depth estimation [130], efficient burst HDR and restoration [58], cross-domain few-shot object detection [29], short-form UGC video quality assessment and enhancement [62, 63], text to image generation model quality assessment [36], day and night rain-drop removal for dual-focused images [61], video quality assessment for video conferencing [47], low light image enhancement [75], light field super-resolution [121], restore any image model (RAIM) in the wild [68], raw restoration and super-resolution [16] and raw reconstruction from RGB on smartphones [17].", + "bbox": [ + 511, + 90, + 903, + 363 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. NTIRE 2025 Efficient Super-Resolution Challenge", + "text_level": 1, + "bbox": [ + 511, + 388, + 906, + 425 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The goals of this challenge include: (i) promoting research in the area of single-imae efficient super-resolution, (ii) facilitating comparisons between the efficiency of various methods, and (iii) providing a platform for academic and industrial participants to engage, discuss, and potentially establish collaborations. This section delves into the specifics of the challenge.", + "bbox": [ + 511, + 435, + 906, + 542 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.1. Dataset", + "text_level": 1, + "bbox": [ + 511, + 563, + 609, + 578 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The DIV2K [4] dataset and LSDIR [64] dataset are utilized for this challenge. The DIV2K dataset consists of 1,000 diverse 2K resolution RGB images, which are split into a training set of 800 images, a validation set of 100 images, and a test set of 100 images. The LSDIR dataset contains 86,991 high-resolution high-quality images, which are split into a training set of 84,991 images, a validation set of 1,000 images, and a test set of 1,000 images. In this challenge, the corresponding LR DIV2K images are generated by bicubic downsampling with a down-scaling factor of $4 \\times$ . The training images from DIV2K and LSDIR are provided to the participants of the challenge. During the validation phase, 100 images from the DIV2K validation set and 100 images from the LSDIR validation set are made available to participants. During the test phase, 100 images from the DIV2K test set and another 100 images from the LSDIR test set are used. Throughout the entire challenge, the testing HR images remain hidden from the participants.", + "bbox": [ + 511, + 589, + 903, + 862 + ], + "page_idx": 1 + }, + { + "type": "page_footnote", + "text": "https://www.cvlai.net/ntire/2025/", + "bbox": [ + 537, + 887, + 799, + 898 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 493, + 924, + 504, + 935 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.2. EFDN Baseline Model", + "text_level": 1, + "bbox": [ + 89, + 90, + 299, + 104 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The Edge-Enhanced Feature Distillation Network (EFDN) [116] serves as the baseline model in this challenge. The aim is to improve its efficiency in terms of runtime, number of parameters, and FLOPs, while at least maintaining 26.90 dB on the DIV2K_LSDIR_valid dataset and 26.99 dB on the DIV2K_LSDIR_test dataset.", + "bbox": [ + 89, + 113, + 483, + 204 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The main idea within EFDN is a combination of block composing, architecture searching, and loss designing to obtain a trade-off between performance and lightweighting. Especially, For block composing, EFDN sum up the re-parameterization methods [20, 21, 138] and designs a more effective and complex edge-enhanced diverse branch block. In detail, they employ several reasonable reparameterizable branches to enhance the structural information extraction, and then they integrate them into a vanilla convolution to maintain the inference performance. To ensure the effective optimization of parallel branches in EDBB, they designed an edge-enhanced gradient-variance loss (EG) based on the gradient-variance loss [1]. The proposed loss enforces minimizing the difference between the computed variance maps, which is helpful to restore sharper edges. The gradient maps calculated by different filters and the corresponding EG loss. In addition, the NAS strategy of DLSR is adopted to search for a robust backbone.", + "bbox": [ + 89, + 205, + 483, + 474 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The baseline EFDN emerges as the 1st place for the overall performance of the NTIRE2023 Efficient SR Challenge [116]. The quantitative performance and efficiency metrics of EFDN are summarized as follows: (1) The number of parameters is $0.276\\mathrm{M}$ . (2) The average PSNRs on validation (DIV2K 100 valid images and LSDIR 100 valid images) and testing (DIV2K 100 test images and LSDIR 100 test images) sets of this challenge are 26.93 dB and 27.01 dB, respectively. (3) The runtime averaged to 22.18ms on the validation and test set with PyTorch $2.0.0 + \\mathrm{cu}118$ , and a single NVIDIA RTX A6000 GPU. (4) The number of FLOPs for an input of size $256\\times 256$ is $16.70\\mathrm{G}$ .", + "bbox": [ + 89, + 478, + 483, + 672 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.3. Tracks and Competition", + "text_level": 1, + "bbox": [ + 89, + 686, + 313, + 702 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The aim of this challenge is to devise a network that reduces one or several aspects such as runtime, parameters, and FLOPs, while at least maintaining the 26.90 dB on the DIV2K_LSDIR valid dataset, and 26.99 dB on the DIV2K_LSDIR test dataset.", + "bbox": [ + 89, + 709, + 482, + 784 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Challenge phases: (1) Development and validation phase: Participants were given access to 800 LR/HR training image pairs and 200 LR/HR validation image pairs from the DIV2K and the LSDIR datasets. An additional 84,991 LR/HR training image pairs from the LSDIR dataset are also provided to the participants. The EFDN model, pretrained parameters, and validation demo script are available", + "bbox": [ + 89, + 794, + 483, + 901 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "on GitHub https://github.com/Amazingren/NTIRE2025_ESR, allowing participants to benchmark their models' runtime on their systems. Participants could upload their HR validation results to the evaluation server to calculate the PSNR of the super-resolved image produced by their models and receive immediate feedback. The corresponding number of parameters, FLOPs, and runtime will be computed by the participants. (2) Testing phase: In the final test phase, participants were granted access to 100 LR testing images from DIV2K and 100 LR testing images from LSDIR, while the HR ground-truth images remained hidden. Participants submitted their super-resolved results to the Codalab evaluation server and emailed the code and factsheet to the organizers. The organizers verified and ran the provided code to obtain the final results, which were then shared with participants at the end of the challenge.", + "bbox": [ + 511, + 90, + 906, + 333 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Evaluation protocol: Quantitative evaluation metrics included validation and testing PSNRs, runtime, FLOPs, and the number of parameters during inference. PSNR was measured by discarding a 4-pixel boundary around the images. The average runtime during inference was computed on the 200 LR validation images and the 200 LR testing images. The average runtime on the validation and testing sets served as the final runtime indicator. FLOPs are evaluated on an input image of size $256 \\times 256$ . Among these metrics, runtime was considered the most important. Participants were required to maintain a PSNR of at least 26.90 dB on the DIV2K_LSDIR valid dataset, and 26.99 dB on the DIV2K_LSDIR test dataset during the challenge. The constraint on the testing set helped prevent overfitting on the validation set. It's important to highlight that methods with a PSNR below the specified threshold (i.e., 26.90 dB on DIV2K_LSDIR_valid and, 26.99 dB on DIV2K_LSDIR_test) will not be considered for the subsequent ranking process. It is essential to meet the minimum PSNR requirement to be eligible for further evaluation and ranking. A code example for calculating these metrics is available at https://github.com/Amazingren/NTIRE2025_ESR.", + "bbox": [ + 511, + 349, + 908, + 698 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "To better quantify the rankings, we followed the scoring function from NTIRE2024 ESR [91] for three evaluation metrics in this challenge: runtime, FLOPs, and parameters. This scoring aims to convert the performance of each metric into corresponding scores to make the rankings more significant. Especially, the score for each separate metric (i.e., Runtime, FLOPs, and parameter) for each sub-track is calculated as:", + "bbox": [ + 511, + 700, + 908, + 821 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\text {S c o r e} _ {\\text {M e t r i c}} = \\frac {\\operatorname {E x p} (2 \\times \\operatorname {M e t r i c} _ {\\text {T e a m X}})}{\\operatorname {M e t r i c} _ {\\text {B a s e l i n e}}}, \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 558, + 837, + 906, + 869 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "based on the score of each metric, the final score used for", + "bbox": [ + 511, + 885, + 906, + 900 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 924, + 504, + 935 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "the main track is calculated as:", + "bbox": [ + 89, + 90, + 295, + 104 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\text {S c o r e} = w _ {1} \\times \\text {S c o r e} \\\\ + w _ {2} \\times S c o r e \\_ F L O P s \\tag {2} \\\\ + w _ {3} \\times S c o r e \\_ P a r a m s, \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 153, + 109, + 480, + 162 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $w_{1}, w_{2}$ , and $w_{3}$ are set to 0.7, 0.15, and 0.15, respectively. This setting is intended to incentivize participants to design a method that prioritizes speed efficiency while maintaining a reasonable model complexity.", + "bbox": [ + 89, + 169, + 482, + 231 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3. Challenge Results", + "text_level": 1, + "bbox": [ + 89, + 241, + 264, + 258 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The final challenge results and the corresponding rankings are presented in Tab. 1 The table also includes the baseline method EFDN [116] for comparison. In Sec.4, the methods evaluated in Tab. 1 are briefly explained, while the team members are listed in A. The performance of different methods is compared from four different perspectives, including the runtime, FLOPs, the parameters, and the overall performance. Furthermore, in order to promote a fair competition emphasizing efficiency, the criteria for image reconstruction quality in terms of test PSNR are set to 26.90 and 26.99 on the DIV2K_LSDIR_valid and DIV2K_LSDIR_test sets, respectively.", + "bbox": [ + 89, + 266, + 482, + 446 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Runtime. In this challenge, runtime stands as the paramount evaluation metric. ShannonLab's solution emerges as the frontrunner with the shortest runtime among all entries in the efficient SR challenge, securing its top-3 ranking position. Following closely, the TSSR and mbga claim the second and third spots, respectively. Remarkably, the average runtime of the top three solutions on both the validation and test sets remains below $10\\mathrm{ms}$ . Impressively, the first 13 teams present solutions with an average runtime below $16\\mathrm{ms}$ , showcasing a continuous enhancement in the efficiency of image SR networks. Despite the slight differences in runtime among the top three teams, the challenge retains its competitive edge. An additional distinction from previous challenges worth noting is that this year, runtime performance no longer predominantly dictates the overall rankings as it has in the past, where the top three solutions in terms of runtime were also the top performers in the main track (e.g., from NTIRE ESR 2024 [91]). This shift indicates that participants are now emphasizing a more balanced approach, focusing not only on runtime optimization but also on improving the comprehensive performance of their models", + "bbox": [ + 89, + 448, + 482, + 779 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Parameters. Model complexity was further evaluated by considering the number of parameters, as detailed in Table 1. In this sub-track, VEPG_C achieved the top position with only 0.044M parameters, closely followed by HannahSR and XUPTBoys with 0.060M and 0.072M parameters, respectively. The minimal disparity among the top three methods highlights their competitive edge and efficiency in managing model complexity. They were scored", + "bbox": [ + 89, + 780, + 482, + 901 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "at 1.38, 1.54, and 1.68, respectively, indicating a tight competition. However, it is noteworthy that these models also exhibited relatively high runtimes, suggesting an area for potential improvement in future iterations.", + "bbox": [ + 511, + 90, + 903, + 151 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "FLOPs. The number of floating-point operations (FLOPs) is another critical metric for assessing model complexity. Within this sub-track, VEPG_C, XUPTBoys, and HannahSR secured the top three positions with FLOPs of 3.13G, 3.39G, and 3.75G, respectively. The competitiveness of this sub-track is further confirmed by the close scores of 1.45, 1.50, and 1.57, aligned with the parameter evaluation results. Remarkably, the same models top both the parameters and FLOPs evaluations, demonstrating consistent performance across different complexity metrics. Similar to the parameters sub-track, the extended runtimes of these methods point to a need for further research and optimization. Key implications include: i) Efficiency vs. Performance Trade-off: The close competition among the top models in terms of parameters and FLOPs suggests a significant trade-off between model efficiency and performance. Despite achieving minimal parameter counts and FLOPs, the high runtimes indicate that these models might be optimizing computational complexity at the expense of execution speed. This raises important considerations for future research in balancing efficiency with real-world usability, especially in applications where inference speed is critical. ii) Potential for Model Optimization: The consistency in ranking between the parameters and FLOPs sub-tracks reveals that models which are optimized for one aspect of computational efficiency tend to perform well in others. However, the noted high runtimes across these models suggest an untapped potential for holistic model optimization. Future work could focus on integrating more advanced optimization techniques or exploring novel architectural innovations to enhance both the computational efficiency and runtime performance.", + "bbox": [ + 511, + 152, + 906, + 638 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Overall Evaluation. The final assessment of performance employs a comprehensive metric that synthesizes runtime, FLOPs, and the number of parameters into a unified score. In this rigorous evaluation, the EMSR Group excelled, claiming the prestigious top position, followed by XiaomiMM (the winner of the NTIRE ESR 2024 challenge) and ShannonLab in second and third places, respectively. This achievement highlights the sophisticated engineering and innovative approaches implemented by these groups.", + "bbox": [ + 511, + 641, + 905, + 777 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Contrasting with the previous year, where runtime heavily influenced overall rankings, this year presents a shift. The best performer in runtime only secured third place in the overall competition. Specifically, EMSR, the overall winner, ranked fifth in runtime, sixth in parameters, and seventh in FLOPs. Similarly, XiaomiMM, which came second overall, was fourth in runtime, eleventh in parameters, and thirteenth in FLOPs. This demonstrates that: i) A balanced", + "bbox": [ + 511, + 780, + 908, + 901 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 493, + 924, + 504, + 935 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Table 1. Results of Ninth NTIRE 2025 Efficient SR Challenge. The performance of the solutions is compared thoroughly from three perspectives including the runtime, FLOPs, and the number of parameters. The underscript numbers associated with each metric score denote the ranking of the solution in terms of that metric. For runtime, “Val.” is the runtime averaged on DIV2K_LSDIR_valid validation set. “Test” is the runtime averaged on a test set with 200 images from DIV2K_LSDIR_test set, respectively. “Ave.” is averaged on the validation and test datasets. “#Params” is the total number of parameters of a model. “FLOPs” denotes the floating point operations. Main Track combines all three evaluation metrics. The ranking for the main track is based on the score calculated via Eq. 2, and the ranking for other sub-tracks is based on the score of each metric via Eq. 1. Please note that this is not a challenge for PSNR improvement. The “validation/testing PSNR” is not ranked. For all the scores, the lower, the better.", + "bbox": [ + 89, + 88, + 906, + 200 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/7d49cf2eb698bccefc0d40589b7e4973c91e663cd6c23a2111a0cbded9d52ffc.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
TeamsPSNR [dB]Runtime [ms]#Params [M]FLOPs [G]Sub-Track ScoresMain-Track
Val.TestVal.TestAve.Runtime#ParamsFLOPsOverall ScoreRanking
EMSR26.9226.9910.2689.7209.9940.1318.542.46(5)2.58(6)2.78(7)2.531
XiaomiMM26.9227.009.9589.1329.5450.1489.682.36(4)2.92(11)3.19(13)2.572
ShannonLab26.9027.008.9388.3028.6200.17211.232.18(1)3.48(17)3.84(18)2.623
TSSR26.9027.029.8128.8989.3550.16410.692.32(2)3.28(15)3.60(16)2.664
Davinci26.9227.0011.4269.87610.6510.1469.552.61(6)2.88(9)3.14(11)2.735
SRCB26.9227.0011.4129.96010.6860.1469.552.62(7)2.88(9)3.14(11)2.746
Rochester26.9427.0111.93410.45411.1940.15810.302.74(8)3.14(14)3.43(14)2.917
mbga26.9027.009.8229.2089.5150.19212.562.36(3)4.02(19)4.50(20)2.938
IESR26.9026.9913.76012.58213.1710.1438.323.28(10)2.82(7)2.71(6)3.129
ASR26.9027.0013.86411.98412.9240.1549.063.21(9)3.05(12)2.96(8)3.1510
VPEG_O26.9026.9916.35613.92615.1410.1459.423.92(12)2.86(8)3.09(9)3.6311
mmSR26.9527.0514.45012.03613.2430.21213.853.30(11)4.65(21)5.25(23)3.8012
ChanSR26.9227.0316.73815.59216.1650.21011.594.29(16)4.58(20)4.01(19)4.2913
Pixel Alchemists26.9027.0117.32214.60815.9650.21312.934.22(14)4.68(22)4.70(21)4.3614
MiSR26.9027.0217.05614.98816.0220.21313.864.24(15)4.68(22)5.26(24)4.4615
LZ26.9027.0116.98015.45016.2150.25216.424.31(17)6.21(25)7.15(25)5.0216
Z626.9026.9920.36216.18418.2730.30318.705.19(20)8.99(27)9.39(27)6.3917
TACO_SR26.9427.0517.82815.65216.7400.34220.034.52(18)11.92(30)11.01(30)6.6118
AIOT_AI26.9027.0019.83618.15818.9970.30119.565.54(21)8.86(26)10.41(28)6.7719
JNU62026.9027.0120.68818.28219.4850.32520.315.79(22)10.54(29)11.39(31)7.3420
LVGroup_HFUT26.9627.0716.39414.87615.6350.42627.874.09(13)21.91(33)28.15(34)10.3821
SVM26.9227.0430.61028.13429.3720.25113.3914.13(23)6.16(24)4.97(22)11.5622
YG26.9227.0433.65831.61432.6360.0935.8218.96(24)1.96(5)2.01(5)13.8723
NanoSR26.9727.0817.93016.30017.1150.55136.024.68(19)54.20(35)74.72(35)22.6124
MegastudyEdu Vision AI27.0127.1339.37637.52838.4520.16910.6332.03(25)3.40(16)3.57(15)23.4725
XUPTBoys26.9127.0350.56435.01242.7880.0723.3947.36(26)1.68(3)1.50(2)33.6326
MILA26.9027.0244.36242.03443.1980.0874.9349.14(27)1.88(4)1.80(4)34.9527
AiMF_SR26.9827.1046.59443.09244.8430.1809.4857.00(28)3.69(18)3.11(10)40.9228
EagleSR27.0427.1647.73045.19246.4610.35221.8965.95(29)12.82(31)13.76(32)50.1529
BVIVSR26.9726.9949.48846.79848.1430.15510.7976.75(30)3.07(13)3.64(17)54.7330
HannahSR26.9027.0258.28641.42249.8540.0603.7589.55(31)1.54(2)1.57(3)63.1531
VPEG_C26.9027.0060.04640.95050.4980.0443.1394.90(32)1.38(1)1.45(1)66.8632
CUIT_HT27.0927.2062.03859.10660.5720.30919.75235.36(33)9.39(28)10.65(29)167.7633
GXZY AI27.0127.13102.92499.102101.0130.42825.889.02e3(34)22.23(34)22.18(33)6.32e334
SCMSR26.9227.00133.866114.088123.9770.39317.627.15e4(35)17.25(32)8.25(26)5.01e435
IPCV27.2727.40366.924357.268362.0960.86665.661.51e14(36)531.32(37)2.60e3(36)1.05e1436
X-L27.0727.21525.966479.346502.6560.96670.834.81e19(37)1.10e3(38)4.83e3(37)3.36e1937
Quantum Res27.2927.40574.632558.934566.7830.79076.091.56e22(38)306.32(36)9.07e3(38)1.09e2238
The following methods are not ranked since their validation/testing PSNR (underlined) is not on par with the threshold.
SylabSR24.3624.4628.58024.82626.7030.0727.9011.111.682.588.41-
NJUPCA26.7026.8070.20252.93261.5672.30830.11257.451.83e736.822.75e6-
DepthIBN26.5626.6639.15436.87638.0150.1217.7130.802.402.5222.30-
Cidaut AI26.8626.9527.22024.97426.0970.21012.8310.524.584.658.75-
IVL26.6626.7618.74616.94417.8450.24015.645.005.696.515.33-
Baseline26.9327.0123.91220.45422.1830.27616.77.397.397.397.39-
", + "bbox": [ + 91, + 212, + 906, + 640 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "approach to model design, optimizing across multiple metrics rather than focusing on a single aspect, is becoming crucial in competitive evaluations. ii) Achieving top performance in one metric does not guarantee similar success in overall rankings, underscoring the complexity of model optimization in real-world scenarios. This year's goal was to encourage a balanced pursuit of speed and efficiency, a challenge that has evidently led to significant innovations and advancements in model design.", + "bbox": [ + 89, + 666, + 480, + 801 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "PSNR. Team Quantum Res, IPCV, X-L, and CUIT_HTT demonstrate superior PSNR values, a critical evaluation metric in super-resolution. Specifically, Quantum Res and IPCV lead with an exceptional 27.40 dB, closely followed by X-L with 27.21 dB, and CUIT_HTT at 27.20 dB on the DIV2K_LSDIR_test set. Despite these impressive perfor", + "bbox": [ + 89, + 809, + 482, + 901 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "mances, it is essential to emphasize that the primary focus of this challenge is on efficiency in super-resolution. Accordingly, we have adjusted the PSNR criteria, setting rigorous lower thresholds of 26.90 dB and 26.99 dB for the DIV2K_LSDIR_valid and DIV2K_LSDIR_test sets, respectively. This adjustment is designed to prioritize a balance between high performance and computational efficiency. A commendable total of 38 teams met this adjusted benchmark, demonstrating their capability to effectively balance image quality with efficiency. However, teams like IVL, Cidaut AI, SylabSR DepthIB, and NJUPCA, while notable for their efficiency, did not achieve the required PSNR levels. This highlights the ongoing challenge of optimizing super-resolution processes that meet both efficiency and performance standards, underscoring the complex nature of", + "bbox": [ + 511, + 666, + 906, + 893 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 924, + 504, + 935 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "advancements in this field.", + "bbox": [ + 91, + 90, + 269, + 104 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3.1. Main Ideas", + "text_level": 1, + "bbox": [ + 89, + 133, + 215, + 148 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Throughout this challenge, several techniques have been proposed to enhance the efficiency of deep neural networks for image super-resolution (SR) while striving to maintain optimal performance. The choice of techniques largely depends on the specific metrics that a team aims to optimize. Below, we outline some typical ideas that have emerged:", + "bbox": [ + 89, + 161, + 482, + 253 + ], + "page_idx": 5 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Distillation is an effective manner to maintain the PSNR performance without increasing computation cost during inference. The team EMSR added only the ConvLora-Like [7] operation into the base model. Similarly, team ESPAN also proposed to use the self-distillation for progressive learning strategy validated from [42].", + "- Re-parameterization [22] [24, 126] is commonly used in this challenge. Usually, a normal convolutional layer with multiple basic operations ( $3 \\times 3$ convolution, $1 \\times 1$ operation, first and second-order derivative operators, skip connections) is parameterized during training. During inference, the multiple operations that reparameterize a convolution could be merged back into a single convolution. e.g., Some top teams (i.e., XiaomiMM, mmSR, HannahSR, etc) used this operation in their methods.", + "- Parameter-free attention mechanism is validated as a useful technique to enhance computational efficiency [24, 126]. Specifically, XiaomiMM proposed a swift parameter-free attention network based on parameter-free attention, which achieves the lowest runtime while maintaining a decent PSNR performance.", + "- Incorporating multi-scale information and hierarchical module design are proven strategies for effectively fusing critical information. For instance, solutions such as HannahSR, XuPTBoys, and ChanSR have successfully utilized multi-scale residual connections and hierarchical module designs to enhance their performance.", + "- Network pruning plays an important role. It is observed that a couple of teams (i.e., ASR, Davinci) used network pruning techniques to slightly compress a network. This leads to a more lightweight architecture without a heavy performance drop.", + "- Exploration with new network architectures is conducted. Besides the common CNN or Transformers, the state space model (i.e., vision mamba [30, 32]) was tried by GXZY.AI in this challenge, which was also validated in the last NTIRE ESR challenge [91].", + "- Various other techniques are also attempted. Some teams also proposed solutions based on neural architecture search, vision transformers, frequency processing, multi-stage design, and advanced training strategies." + ], + "bbox": [ + 89, + 266, + 482, + 901 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3.2. Fairness", + "text_level": 1, + "bbox": [ + 513, + 90, + 614, + 104 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "To ensure the integrity and fairness of the Efficient SR Challenge, we meticulously established a set of rules focusing on the permissible datasets for training the models. Participants were allowed to augment their training with external datasets, such as Flickr2K, to promote diverse and comprehensive model training experiences. However, to guarantee an unbiased evaluation, the use of additional DIV2K and LSDIR validation sets, which include both high-resolution (HR) and low-resolution (LR) images, was explicitly prohibited during the training phase. This restriction aimed to maintain the validation set's integrity as a vital benchmark for assessing the proposed networks' performance and generalizability. Moreover, using LR images from the DIV2K and LSDIR test sets for training was strictly forbidden, ensuring the test dataset's purity and upholding the evaluation process's integrity. Lastly, the adoption of advanced data augmentation techniques during training was encouraged as a fair practice, allowing participants to enhance their models within the defined rules and guidelines.", + "bbox": [ + 511, + 112, + 906, + 400 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3.3. Conclusions", + "text_level": 1, + "bbox": [ + 511, + 409, + 643, + 422 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The analysis of the submissions to this year's Efficient SR Challenge allows us to draw several important conclusions:", + "bbox": [ + 511, + 431, + 905, + 460 + ], + "page_idx": 5 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Firstly, the competition within the image super-resolution (SR) community remains intense. This year, the challenge attracted 244 registered participants, with 43 teams making valid submissions. All proposed methods have enhanced the state-of-the-art in efficient SR. Notably, the competition among the top three teams has intensified, with last year's winner ranking second this year.", + "- Secondly, unlike in previous challenges, dominance in runtime no longer characterizes the top-ranking teams. Instead, more balanced solutions that consider all aspects of performance are proving to be more beneficial.", + "- Thirdly, consistent with the success of deep learning techniques like DeepSeek, the distillation approach has significantly contributed to performance improvements without adding computational complexity.", + "- Fourthly, re-parameterization and network compression have emerged as crucial techniques in enhancing efficiency in SR. Ongoing exploration in these areas is encouraged to further boost efficiency.", + "- Fifthly, the use of large-scale datasets, such as the one described in [64], for pre-training has been shown to enhance accuracy significantly. Typically, training incorporates multiple phases, gradually increasing the patch size and decreasing the learning rate, optimizing the training process.", + "- Sixthly, this year's challenge saw the introduction of the state space model, presenting a novel approach that may influence future research directions in the field." + ], + "bbox": [ + 511, + 462, + 905, + 883 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Overall, by considering factors like runtime, FLOPs,", + "bbox": [ + 531, + 885, + 903, + 901 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 493, + 925, + 504, + 936 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "and parameter count simultaneously, it is feasible to design models that optimize across multiple evaluation metrics. Finally, as computational capabilities continue to evolve, the focus on optimizing models for runtime, FLOPs, and parameter efficiency becomes increasingly vital. With advancements in both hardware and software, we expect the development of more sophisticated and efficient models in the super-resolution domain. The pursuit of efficiency in SR is likely to remain a key driver of innovation, promising exciting advancements and continual progress in the field.", + "bbox": [ + 89, + 90, + 483, + 243 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4. Challenge Methods and Teams", + "text_level": 1, + "bbox": [ + 89, + 258, + 372, + 276 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.1. EMSR", + "text_level": 1, + "bbox": [ + 89, + 285, + 179, + 299 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Method. The overall architecture of the team EMSR is shown in Fig. 1, which is based on the leading efficient super-resolution method SPAN [112]. Inspired by ConvLora [7], the team proposes SconvLB, which incorporates ConvLora into SPAB to improve performance without increasing computation complexity. Specifically, given a pre-trained convolutional layer in SPAB, they update it by adding Lora layers, and representing it with a low-rank decomposition:", + "bbox": [ + 89, + 306, + 483, + 444 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\nW _ {\\text {C o n v L o r a}} = W _ {P T} + X Y, \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 192, + 458, + 482, + 474 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "where $W_{ConvLora}$ denotes the updated weight parameters of the convolution, $W_{PT}$ denotes the original pre-trained parameters of the convolution, $X$ is initialized by random Gaussian distribution, and $Y$ is zero in the beginning of training. Note that the Lora weights can be merged into the main backbone. Therefore, ConvLoras don't introduce extra computation during inference.", + "bbox": [ + 89, + 488, + 483, + 594 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "They adopt the pre-trained SPAN-Tiny model [112] with 26 channels. They replace the SPAB in SPAN with our proposed SconvLB, and also add ConvLora into the pixel shuffle block and the convolution before it. During training, they freeze the original weight and bias of the convolution and only update the Lora parameters.", + "bbox": [ + 89, + 595, + 483, + 686 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Optimization. To supervise the optimization of SconvLB, they adopt a knowledge-based distillation training strategy. They adopt spatial affinity-based knowledge distillation [37] to transfer second-order statistical info from the teacher model to the student model by aligning spatial feature affinity matrices at multiple layers of the networks. Given a feature $F_{l} \\in R^{B \\times C \\times W \\times H}$ extracted from the $l$ -th layer of the network, they first flatten the tensor along the last two dimensions and calculate the affinity matrix $A_{\\text{spatial}}$ . Then the spatial feature affinity-based distillation loss can be formulated as:", + "bbox": [ + 89, + 686, + 483, + 852 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\nL _ {A D} = \\frac {1}{| A |} \\sum_ {l = 1} ^ {n} \\left\\| A _ {l} ^ {S} - A _ {l} ^ {T} \\right\\| _ {1}, \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 181, + 864, + 482, + 902 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "where $A_{l}^{S}$ and $A_{l}^{T}$ are the spatial affinity matrix of student and teacher networks extracted from the feature maps of the $l$ -th layer, respectively. $|A|$ denotes the number of elements in the affinity matrix. Specifically, the team applies the distillation loss after each SconvLB.", + "bbox": [ + 511, + 90, + 903, + 165 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Except for the distillation loss in the feature space, the team applies a pixel-level distillation loss:", + "bbox": [ + 511, + 167, + 903, + 198 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\nL _ {T S} = \\left\\| \\mathcal {T} \\left(I _ {L R}\\right) - \\mathcal {S} \\left(I _ {L R}\\right) \\right\\| _ {1}, \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 602, + 209, + 903, + 227 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "where $\\mathcal{T}$ and $S$ denote the teacher network and the student network, respectively. $I_{LR}$ denotes the LR image.", + "bbox": [ + 511, + 239, + 903, + 268 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "They also apply the $L_{2}$ loss:", + "bbox": [ + 532, + 268, + 723, + 284 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\nL _ {r e c} = \\left\\| I _ {H R} - \\mathcal {S} \\left(I _ {L R}\\right) \\right\\| _ {2} ^ {2}, \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 616, + 296, + 903, + 314 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "where $I_{HR}$ denotes the ground truth high-resolution image. The overall loss is:", + "bbox": [ + 511, + 325, + 903, + 354 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\nL _ {t o t a l} = \\lambda_ {1} L _ {r e c} + \\lambda_ {2} L _ {T S} + \\lambda_ {3} L _ {A D}. \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 581, + 369, + 903, + 386 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Training Details. The team uses DIV2K and LSDIR for training. Random flipping and random rotation are used for data augmentation. The training process is divided into two stages.", + "bbox": [ + 511, + 398, + 903, + 458 + ], + "page_idx": 6 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. Stage One: HR patches of size $192 \\times 192$ are randomly cropped from HR images, and the mini-batch size is set to 8. The model is trained by minimizing the $L_{total}$ mentioned above with the Adam optimizer. The learning rate is $1 \\times 10^{-4}$ . A total of $30k$ iterations are trained.", + "2. Stage Two: In the second stage, the team increases the size of the HR image patches to $256 \\times 256$ , with other settings remaining the same as in the first stage." + ], + "bbox": [ + 513, + 459, + 903, + 580 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Throughout the entire training process, they employ an Exponential Moving Average (EMA) strategy to enhance the robustness of training.", + "bbox": [ + 511, + 580, + 903, + 626 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.2. XiaomiMM", + "text_level": 1, + "bbox": [ + 513, + 636, + 638, + 651 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Method Details. The team proposes an accelerated variant of the Swift Parameter-free Attention Network (SPAN) [112], called SPANF, which is built upon the fundamental SPAB block. To enhance the inference speed, SPANF introduces several key modifications compared to the original SPAN model. Firstly, they remove the last SPAB block, which reduces computational complexity without significantly impacting performance. Secondly, they increase the number of channels to 32, providing a better balance between model capacity and speed. Thirdly, they replace the first convolution layer with a nearest neighbor upsampling operation, which is computationally less intensive and accelerates the upsampling process. Lastly, they implement simple modifications to the shortcut connections within the network to further streamline computations. These changes collectively enable SPANF to achieve faster", + "bbox": [ + 511, + 657, + 906, + 900 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 493, + 924, + 503, + 935 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/8ac9e00d1996213e6f79f7b908791efe8ef055eead3451ed8eaed1fab8097e08.jpg", + "image_caption": [ + "Figure 1. Team EMSR: The team incorporates ConvLoras into the network to increase the performance without adding extra complexity." + ], + "image_footnote": [], + "bbox": [ + 94, + 93, + 903, + 291 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/aad7982650cb376bfab88d1f41e6514c27a63d4ddf9e751f866833b7ce6411d9.jpg", + "image_caption": [ + "Figure 2. The proposed SPANF architecture. The main structure is basically the same as SPAN [112], but one SPAB module is reduced, and the number of channels is 32." + ], + "image_footnote": [], + "bbox": [ + 96, + 342, + 475, + 502 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "inference speeds while maintaining competitive image quality. The evaluations on multiple benchmarks demonstrate that SPANF not only upholds the efficiency of SPAN's parameter-free attention mechanism but also offers superior speed, making it highly suitable for real-world applications, particularly in scenarios with limited computational resources.", + "bbox": [ + 89, + 594, + 482, + 698 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Implementation Details. The dataset utilized for training comprises of DIV2K and LSDIR. During each training batch, 64 HR RGB patches are cropped, measuring $256 \\times 256$ , and subjected to random flipping and rotation. The learning rate is initialized at $5 \\times 10^{-4}$ and undergoes a halving process every $2 \\times 10^{5}$ iterations. The network undergoes training for a total of $10^{6}$ iterations, with the L1 loss function being minimized through the utilization of the Adam optimizer [54]. They repeated the aforementioned training settings four times after loading the trained weights. Subsequently, fine-tuning is executed using the L1 and L2 loss functions, with an initial learning rate of $1 \\times 10^{-5}$ for $5 \\times 10^{5}$ iterations, and HR patch size of 512. They con", + "bbox": [ + 89, + 704, + 483, + 901 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "duced finetuning on four models utilizing both L1 and L2 losses, and employed batch sizes of 64 and 128. Finally, they integrated these models' parameters to obtain our ultimate model.", + "bbox": [ + 511, + 338, + 906, + 397 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.3. ShannonLab", + "text_level": 1, + "bbox": [ + 511, + 407, + 648, + 422 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Method. The method proposed by the team draws inspiration from ECBSR and SPAN. First, they optimized the ECB module by introducing a 1x1 convolutional layer for channel expansion before the input tensor enters the ECB module. After processing, another 1x1 convolution restores the original channel dimensions, while incorporating residual connections. During inference, these components can be merged into a standard 3x3 convolution through reparameterization, thereby enhancing the ECB module's effectiveness without increasing computational overhead. As illustrated in Fig.3, The complete model architecture of TSR comprises a shallow feature extraction convolution, a reconstruction convolution, a PixelShuffle module, and four REECB block which made of stacked optimized ECB.", + "bbox": [ + 511, + 430, + 906, + 642 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Training Details. The model is trained on the DIV2K and LSDIR train dataset with random flipping and rotation applied for data augmentation. The Adam optimizer is consistently employed throughout the training process. The entire training process is divided into five steps.", + "bbox": [ + 511, + 642, + 905, + 717 + ], + "page_idx": 7 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. HR patches of size $256 \\times 256$ are randomly cropped from HR images, and the mini-batch size is set to 32. L1 loss is used and the initial learning rate is set to 5e-4, with a cosine learning rate decay strategy. The total iterations is 500k.", + "2. HR patches of size $256 \\times 256$ are randomly cropped from HR images, and the mini-batch size is set to 32. L1 and L2 loss is used and the initial learning rate is set to 5e-4, with a cosine learning rate decay strategy. The total iterations is 1000k.", + "3. HR patches of size $512 \\times 512$ are randomly cropped from HR images, and the mini-batch size is set to 64. L2" + ], + "bbox": [ + 511, + 719, + 903, + 900 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 493, + 924, + 503, + 935 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/d8575e44d65aceb2fd23b5bd961f7c359c9fc2f3717d1291c9ae893c01490dc3.jpg", + "image_caption": [ + "Figure 3. Team ShannonLab: The pipeline of TSR." + ], + "image_footnote": [], + "bbox": [ + 98, + 93, + 478, + 138 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "loss is used and the initial learning rate is set to 2e-4, with a cosine learning rate decay strategy. The total iterations is $1000\\mathrm{k}$ .", + "bbox": [ + 89, + 191, + 482, + 234 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "4. HR patches of size $512 \\times 512$ are randomly cropped from HR images, and the mini-batch size is set to 64. L2 loss is used and the initial learning rate is set to 1e-4, with a cosine learning rate decay strategy. The total iterations is 1000k.", + "5. HR patches of size $512 \\times 512$ are randomly cropped from HR images, and the mini-batch size is set to 64. L2 loss is used and the initial learning rate is set to 1e-5, with a cosine learning rate decay strategy. The total iterations is 1000k." + ], + "bbox": [ + 89, + 236, + 482, + 385 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "4.4. TSSR", + "text_level": 1, + "bbox": [ + 89, + 393, + 171, + 409 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Method. They combined the ideas of reparameterization and attention mechanism to design a model that can capture image information in the network and effectively achieve image super-resolution.", + "bbox": [ + 89, + 417, + 482, + 477 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Training Details. The training process is divided into three steps.", + "bbox": [ + 89, + 478, + 482, + 508 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. HR patches of size $256 \\times 256$ are randomly cropped from HR images, and the mini-batch size is set to 64. L1 loss with AdamW optimizer is used and the initial learning rate is set to 0.0005 and halved at every 100k iterations. The total iterations is 500k.", + "2. HR patches of size $256 \\times 256$ are randomly cropped from HR images, and the mini-batch size is set to 64. L1 and L2 loss with AdamW optimizer is used and the initial learning rate is set to 0.0002 and halved at every 100k iterations. The total iterations is 1000k.", + "3. HR patches of size $512 \\times 512$ are randomly cropped from HR images, and the mini-batch size is set to 64. L2 loss with AdamW optimizer is used and the initial learning rate is set to 0.0001 and halved at every 100k iterations. The total iterations is 1000k." + ], + "bbox": [ + 89, + 508, + 482, + 734 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "4.5. mbga", + "text_level": 1, + "bbox": [ + 89, + 742, + 171, + 758 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Architecture. The team proposes the ESPAN, which is based on SPAN [111]. Through evaluations of depth-channel combinations in SPAN on an A6000 GPU, they determined that setting the number of channels to 32 yields higher efficiency than 28 channels. To reduce parameters and FLOPs, a depth of 6 was adopted. Additionally, a $9 \\times 9$ convolution replaced the conventional $3 \\times 3$ convolution at the network's input stage since they find that $9 \\times 9$ convolution is faster than $3 \\times 3$ convolution on A6000.", + "bbox": [ + 89, + 763, + 482, + 900 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/8625a75f76c2da91bb4d2e4dae9cd14a3e706d54d637ec9410ab4e46f76d0fe9.jpg", + "image_caption": [ + "Figure 4. Team mbga: General Reparameterization." + ], + "image_footnote": [], + "bbox": [ + 599, + 98, + 823, + 280 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/ffaeb29db7fde13f35fcbf1cb39fb49d6fad1f29499bad6f15b795904e69fbdc.jpg", + "image_caption": [ + "Figure 5. Team mbga: ESPAN with self distillation." + ], + "image_footnote": [], + "bbox": [ + 516, + 333, + 906, + 435 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "General Reparameterization. Inspired by MobileOne [107] and RepVGG [23], the team proposes a generalized reparameterization block (Fig. 4). The block consists of four $1 \\times 1 - 3 \\times 3$ convolution branches, one $1 \\times 1$ convolution branch, and one $3 \\times 3$ convolution branch. Skip connections are omitted due to empirical observations of training instability. While additional duplicated branches or $3 \\times 3 - 1 \\times 1$ convolution branches are feasible, the current configuration is found to offer superior performance consistency during optimization.", + "bbox": [ + 511, + 489, + 906, + 641 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Self distillation and progressive learning. Inspired by RIFE [42], self-distillation is incorporated into their training pipeline. The teacher model shares the identical backbone as the student model but includes three extra SPAB blocks appended to the student's backbone (Fig. 5). A self-distillation loss similar to RIFE's formulation is adopted to co-train the teacher and student networks. This design enables the teacher model to learn robust backbone features. After the distillation phase, the student loss and distillation loss components are removed, and the entire teacher model is fine-tuned. Leveraging the pre-trained robust teacher, progressive learning is employed: the extra SPAB blocks are gradually removed from the teacher's backbone, finally resulting in an architecture identical to the original student model.", + "bbox": [ + 511, + 642, + 908, + 867 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Frequency-Aware Loss. Since small models have limited parameters, during training, they should make the model fo", + "bbox": [ + 511, + 869, + 906, + 902 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 493, + 924, + 504, + 935 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "cus more on important (or difficult) areas. In their methods, two types of frequency-aware losses are employed. The first type is the DCT loss. They use the discrete cosine transform (DCT) to convert the RGB domain to the frequency domain and then apply the L1 loss to calculate the difference. The other type is the edge loss. They add a blur to the image and then subtract the blurred image from the original one to obtain the high frequency area. Subsequently, the L1 loss is calculated on this high frequency area.", + "bbox": [ + 89, + 90, + 480, + 226 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Training details: The training process contains two stages. And the training dataset is the DIV2K_LSDIR_train. General reparameterization is used on the whole process.", + "bbox": [ + 89, + 227, + 480, + 272 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "I. At the first stage, they use self distillation to train the teacher model.", + "bbox": [ + 89, + 272, + 480, + 301 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Step1. The team first trains a 2x super-resolution model. HR patches of size 256x256 are randomly cropped from HR images, and the mini-batch size is set to 64. L1 loss and self distillation loss with AdamW optimizer are used and the initial learning rate is set to 0.0001 and halved at every 100k iterations. The total iterations is 500k. This step is repeated twice. And then they follow the same training setting and use 2x super-resolution model as pretrained model to train a 4x super-resolution model. This step is repeated twice.", + "- Step2. HR patches of size 512x512 are randomly cropped from HR images, and the mini-batch size is set to 16. MSE loss, frequency-aware loss and self distillation loss with AdamW optimizer are used and the initial learning rate is set to 0.0001 and halved at every 100k iterations. The total iterations is 500k. This step is also repeated twice.", + "- Step3. They only train the teacher model. HR patches of size 512x512 are randomly cropped from HR images, and the mini-batch size is set to 16. MSE loss and frequency-aware loss with AdamW optimizer are used and the initial learning rate is set to 0.00005 and halved at every 100k iterations. The total iterations is 500k. This step is also repeated twice." + ], + "bbox": [ + 89, + 305, + 482, + 667 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "II. At the second stage, they use progressive learning to get the final student model.", + "bbox": [ + 89, + 671, + 482, + 700 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Step4. They drop the additional SPAB block one by one. HR patches of size 512x512 are randomly cropped from HR images, and the mini-batch size is set to 16. L1 loss with AdamW optimizer are used and the initial learning rate is set to 0.0001 and halved at every 100k iterations. The total iterations is 500k.", + "- Step5. They repeat the following training process many times until convergence. HR patches of size 512x512 are randomly cropped from HR images, and the mini-batch size is set to 16. MSE loss and frequency-aware loss with AdamW optimizer are used and the initial learning rate is set to 0.00005 and halved at every 100k iterations. The total iterations is 500k." + ], + "bbox": [ + 89, + 704, + 482, + 898 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "4.6. VPEG_C", + "text_level": 1, + "bbox": [ + 513, + 90, + 620, + 104 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "General Method Description. As illustrated in Fig. 6, they propose a Dual Attention Network (DAN) for the lightweight single-image super-resolution task. The core components of DAN consist of three parts: a Local Residual Block (LRB), a Spatial Attention Block (SAB), and a Channel Attention Block (CAB).", + "bbox": [ + 511, + 112, + 903, + 202 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Local Residual Block (LRB). They leverage the $1 \\times 1$ convolution layers followed by a $3 \\times 3$ depthwise convolution as the basic unit, repeated three times. Specially, GELU activation is applied on each layers, and the features are passed in a densely connected manner. At the end of the block, feature maps from different levels are aggregated using channel concatenation, effectively capturing local image details.", + "bbox": [ + 511, + 203, + 903, + 308 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Spatial Attention Block (SAB). They adopt the spatial attention design of SMFANet [144], which employs a variance-constrained feature modulation mechanism to aggregate spatial feature. This allows efficient spatial interaction with minimal computational cost.", + "bbox": [ + 511, + 309, + 903, + 383 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Channel Attention Block (CAB). Global channel-wise information is modeled through a self-gating mechanism that enhances local representations and increases model non-linearity. This is followed by a key-value shared MDTA [132] for global interaction and a GDFN [132] for feature refinement.", + "bbox": [ + 511, + 383, + 903, + 473 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Training Description. The proposed DAN consists of 6 feature mixing modules with 16 channels. The training process is divided into two stages:", + "bbox": [ + 511, + 474, + 903, + 520 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. Pre-training Stage: They pre-train DAN using 800 images from the DIV2K [100] and the first 10K images of the LSDIR [64] datasets. The cropped LR image size is $72 \\times 72$ , and the mini-batch size is set to 64. The DAN is trained by minimizing L1 loss and the frequency loss[14] with Adam optimizer for total 800, 000 iterations. The initial learning rate is set to 2e-3 and halved at 200K, 400K, 600K, 700K.", + "2. Fine-tuning Stage: They fine-tune the model on the 800 images of DIV2K [100] and the first 10K images of the LSDIR [64] datasets. The cropped LR image size is $72 \\times 72$ , and the mini-batch size is set to 64. The DAN is trained by minimizing PSNR loss with the Adam optimizer for total 200, 000 iterations. They set the initial learning rate to 5e-4 and halve it at 50K, 100K, 150K, and 175 K." + ], + "bbox": [ + 506, + 522, + 903, + 762 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "4.7. XUPTBoys", + "text_level": 1, + "bbox": [ + 513, + 772, + 635, + 787 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "General Method Description. The XUPTBoys team proposed the Frequency-Guided Multilevel Dispersion Network (FMDN), as shown in Fig. 7.FMDN adopts a similar basic framework to [45, 67, 71, 81].", + "bbox": [ + 511, + 794, + 903, + 854 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Based on the above analysis, they propose the new Frequency-Guided Multi-level Dispersion Block(FMDB) and the new Frequency-Guided Multi-level Dispersion", + "bbox": [ + 511, + 854, + 903, + 900 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 490, + 924, + 508, + 936 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/36311b6faf268e669dcd643228db99b83d29abb2a47e421a048368d4aa625818.jpg", + "image_caption": [ + "Figure 6. Team VPEG_C: An overview of the DAN." + ], + "image_footnote": [], + "bbox": [ + 155, + 106, + 823, + 402 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/bd994d0588e6812fed53823ee522107759f02c0a3d97cd1a32939c53e159da5f.jpg", + "image_caption": [ + "Figure 7. Team XUPTBoys: The whole framework of Frequency-Guided Multi-level Dispersion Network (FMDN)." + ], + "image_footnote": [], + "bbox": [ + 129, + 448, + 816, + 569 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Block Basic(FMDB-B) as the base block of FMDN. As shown in Fig. 8 they use Hierarchical Variance-guided Spatial Attention(HVSA), Reallocated Contrast-Aware Channel Attention (RCCA) as alternatives to Enhanced Spatial Attention (ESA) [73] and Contrast-Aware Channel Attention (CCA) [44], Frequency-Guided Residual block (FRB), Asymmetric FeedForward Network (AFFN), Multilevel Residual Convolution (MRConv) and Multilevel Residual Convolution Basic(MRConv-B). The difference between FMDB and FMDB-B is that the former uses MRConv, while the latter uses MRConv-B.", + "bbox": [ + 89, + 619, + 483, + 787 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "In HVSA, the effects of multilevel branching and local variance on performance are examined. Small-window multilevel branches fail to capture sufficient information, while local variance within a single branch can create significant weight disparities. To address these issues, [81] was enhanced to introduce the D5 and D7 branches, which effectively utilize local variance to capture information", + "bbox": [ + 89, + 794, + 483, + 902 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "rich regions while balancing performance and complexity. In RCCA, this approach improves the traditional channel attention mechanism by not only reallocating weights across channels but also better managing shared information among them. Introduces complementary branches with $1 \\times 1$ convolutions and GELU activation functions, which help redistribute complementary information, improving the uniqueness of each channel. In FRB, it enhances feature representation using convolutional layers and GELU activation. It normalizes input, extracts features with depthwise convolutions of different kernel sizes, and combines them through residual connections to preserve spatial information for effective image processing. In AFFN, it applies layer normalization and a $1 \\times 1$ convolution to expand feature dimensions. It then uses two depthwise convolutions with different kernel sizes, combines the results with GELU activation, and projects the output back to the original dimension with a residual connection. In MRConv and", + "bbox": [ + 511, + 619, + 906, + 893 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 490, + 924, + 506, + 936 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/44d502a6c0ed805d464c3c6f19148e6c1e459ecc1aa0d51d667b715fea387a24.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 102, + 89, + 555, + 172 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/6ea31666372ce72972d1e01c9f83342103a7a680943cd9cd751793bd9f5c5350.jpg", + "image_caption": [ + "(f) MRCov-B" + ], + "image_footnote": [], + "bbox": [ + 563, + 90, + 841, + 175 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/c2192d342d64556a1a2860e014da3529e5c2dc30bbd7246a3a3f93bc4b03ebb7.jpg", + "image_caption": [ + "(b) HVSA" + ], + "image_footnote": [], + "bbox": [ + 125, + 176, + 369, + 443 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/d0ea7d9b2ecaf56aa13a4f1b039a13ff28f10a58d64cc690912ef0f4e806004e.jpg", + "image_caption": [ + "(c) FRB" + ], + "image_footnote": [], + "bbox": [ + 392, + 196, + 516, + 441 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/b758339bd61eafff580164a4527468d8f542327ebe7a1f14835a8397e41c3165.jpg", + "image_caption": [ + "(d) AFFN", + "Figure 8. Team XUPTBoys: The details of each component. (a) FMDB: Frequency-Guided Multi-level Dispersion Block; (b) HVSA: Hierarchical Variance-guided Spatial Attention; (c) FRB: Frequency-Guided Residual Block; (d) AFFN: Asymmetric FeedForward Network; (e) RCCA: Reallocated Contrast-aware Channel Attention; (f) MRConv-B/MRConv: Multilevel Residual Convolution Basic and Multilevel Residual Convolution" + ], + "image_footnote": [], + "bbox": [ + 537, + 217, + 648, + 429 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/b48235d2ae7a52f0f87af13e385c348076ea1d6bd0051f75995284647bdfd624.jpg", + "image_caption": [ + "(e) RCCA" + ], + "image_footnote": [], + "bbox": [ + 666, + 224, + 810, + 421 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "MRConv-B, MRConv and MRConv-B use convolution kernels of different sizes for parallel convolution, and finally activate the output using GELU and combine it with residual connections, effectively preserving spatial information.", + "bbox": [ + 89, + 558, + 482, + 619 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Training Description. The proposed FMDN has 3 FMDB-Basic blocks and 1 FMDB block, in which the number of feature channels is set to 24. The details of the training steps are as follows:", + "bbox": [ + 89, + 623, + 483, + 684 + ], + "page_idx": 11 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. Pretraining on the DIV2K [102] and and Flickr2K [70]. HR patches of size $256 \\times 256$ are randomly cropped from HR images, and the mini-batch size is set to 64. The model is trained by minimizing the L1 loss function [77] with the Adam optimizer [53]. The initial learning rate is set to $2 \\times 10^{-3}$ and halved at $\\{100k, 500k, 800k, 900k, 950k\\}$ -iteration. The total number of iterations is $1000k$ .", + "2. Finetuning on 800 images of DIV2K and the first 10k images of LSDIR [64]. HR patch size and mini-batch size are set to $384 \\times 384$ and 64, respectively. The model is fine-tuned by minimizing L2 loss function [77]. The initial learning rate is set to $5 \\times 10^{-4}$ and halved at $\\{500k\\}$ -iteration. The total number of iterations is" + ], + "bbox": [ + 89, + 688, + 483, + 901 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "1000k.", + "bbox": [ + 535, + 558, + 584, + 571 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "4.8. HannahSR", + "text_level": 1, + "bbox": [ + 513, + 587, + 635, + 602 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "General Method Description. The architecture of the proposed network is depicted in Fig. 9, which is inspired by previous studies such as AGDN [114], MDRN [80] and SPAN [109]. They propose a Multi-level Refinement and Bias-learnable Attention dual branch Network (MRBAN). More specifically, they build upon the AGDN framework by constructing another branch consisting of one $3 \\times 3$ convolution layer (ISRB) and one $1 \\times 1$ convolution layer to enhance the overall performance in a learnable way. Meanwhile, they replace the concat module in the AGDN with a direct element-wise summation, for the sake of harvesting significant savings of the parameters.", + "bbox": [ + 511, + 611, + 906, + 792 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "In addition, they propose the multi-level refinement and bias-learnable attention block (MRBAB) as the basic block of our network. As described in Figure 10, they attempt to minimize the information loss induced by Sigmoid module. When confronted with a negative input with a large absolute value, the output of the Sigmoid module will be approximately equal to zero, which results in remarkable", + "bbox": [ + 511, + 795, + 908, + 902 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 490, + 924, + 508, + 936 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/083bbb9f17a8948f4ae5b792fe6c480e191cd46a4ccd5b188e6f518badad2507.jpg", + "image_caption": [ + "Figure 9. Team HannahSR: The overall architecture of Multi-level Refinement and Bias-learnable Attention Dual Branch Network (MR-BAN)." + ], + "image_footnote": [], + "bbox": [ + 98, + 90, + 898, + 200 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/39e5ebdf23d3857b266c36552ee68e47923ad4ffcfb55aa17c77ee301d9f96b3.jpg", + "image_caption": [ + "(a) Team HannahSR: The MRBAB architecture." + ], + "image_footnote": [], + "bbox": [ + 160, + 258, + 836, + 402 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/308e52fdb5c90b5da45b454cd7914587960c012e3e6058e86a7865d419d13375.jpg", + "image_caption": [ + "(b) Team HannahSR: The MRBA architecture.", + "Figure 10. Team HannahSR: The detailed architecture of the network MRBAN. (a) MRBAB: Multi-level Refinement and Bias-learnable Attention Block; (b) MRBA: Multi-level Refinement and Bias-learnable Attention; Other components: BSRB: Blueprint Shallow Residual Block [66]; BSConv: Blueprint Separable Convolution [66]; RCCA: Reallocated Contrast-aware Channel Attention [114]; SGSA: Sparse Global Self-attention [114]." + ], + "image_footnote": [], + "bbox": [ + 148, + 435, + 854, + 720 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "information loss. To address this issue, SPAN [109] used an origin-symmetric activation function. They added a bias of $-0.5$ to the Sigmoid function, which allowed the information carried by negative inputs to be taken into account.", + "bbox": [ + 89, + 839, + 485, + 898 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "However, when dealing with the larger positive inputs, their outputs would be approximately equal to 0.5. When compared with the original 1.0, they inevitably suffered from significant information loss. To tackle this issue, they set the", + "bbox": [ + 511, + 839, + 908, + 900 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 490, + 924, + 508, + 936 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "negative bias as a learnable parameter so that it can be updated dynamically during the training process to optimally boost the accuracy performance.", + "bbox": [ + 89, + 90, + 482, + 136 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Eventually, they adopt the reparameterization technique. They replace the first $3 \\times 3$ convolution layer with identical scale reparameterization block to extract richer local features for supplying the following layers with more valuable information, while standardizing the number of channels to an identical scale for lightweight super resolution networks to prevent incurring inappropriate model capacity increments.", + "bbox": [ + 89, + 137, + 482, + 257 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Training Strategy. The proposed MRBAN consists of 4 MRBAB, and the feature channel is set to 32. They adopt a four-step training strategy. The details of the training steps are as follows:", + "bbox": [ + 89, + 258, + 483, + 318 + ], + "page_idx": 13 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. Pretraining on the DIV2K [2] and Flickr2K [69] datasets with the patch size of $256 \\times 256$ and the mini-batch size is set to 64. The MRBAN is trained by minimizing the L1 loss function with the Adam optimizer. The initial learning rate is set to $3 \\times 10^{-3}$ and halved at $\\{100\\mathrm{k}, 500\\mathrm{k}, 800\\mathrm{k}, 900\\mathrm{k}, 950\\mathrm{k}\\}$ -iteration. The number of iterations is $1000\\mathrm{k}$ .", + "2. Initial fine-tuning on DIV2K and the first 10K images of LSDIR [64]. The patch size is $384 \\times 384$ and the minibatch size is set to 32. The model is trained by minimizing the MSE loss function. The initial learning rate is set to $1.5 \\times 10^{-3}$ and halved at $\\{100\\mathrm{k}, 500\\mathrm{k}, 800\\mathrm{k}, 900\\mathrm{k}, 950\\mathrm{k}\\}$ -iteration. The number of iterations is $1000\\mathrm{k}$ .", + "3. Advanced training on the DIV2K and the whole LSDIR datasets. The patch size is $384 \\times 384$ and the mini-batch size is set to 64. The model is trained by minimizing the MSE loss function. The initial learning rate is set to $8 \\times 10^{-4}$ and halved at $\\{100\\mathrm{k}, 500\\mathrm{k}, 800\\mathrm{k}, 900\\mathrm{k}, 950\\mathrm{k}\\}$ -iteration. The number of iterations is $1000\\mathrm{k}$ . This stage can be repeated twice.", + "4. Final fine-tuning on the DIV2K and the whole LSDIR datasets. The patch size is $448 \\times 448$ and the mini-batch size is set to 128. The model is trained by minimizing the MSE loss function. The initial learning rate is set to $5 \\times 10^{-6}$ and halved at $\\{100\\mathrm{k}, 500\\mathrm{k}, 800\\mathrm{k}, 900\\mathrm{k}, 950\\mathrm{k}\\}$ -iteration. The number of iterations is $1000\\mathrm{k}$ ." + ], + "bbox": [ + 84, + 320, + 482, + 712 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "4.9. Davinci", + "text_level": 1, + "bbox": [ + 89, + 726, + 187, + 739 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Final Solution Description. They chose the Swift Parameter-free Attention Network [112] as their base model, the winner of the NTIRE2024 ESR track. After trying the evolution pipeline mentioned in SwinFIR [133], the content decoupling strategy proposed in CoDe [31], the pre-training fine-tuning paradigm, and the model compression techniques such as model pruning and knowledge distillation discussed in Ref [51] respectively, they employ the model Pruning of the last layer with $l_{2}$ norm of the baseline and introducing the mixup Augmentation as their final", + "bbox": [ + 89, + 750, + 483, + 900 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/de2cac9923bfa4f52967dd4330cfae9d5dfebaa792800e8bb8bb19662aebe5ea.jpg", + "image_caption": [ + "Figure 11. Team Rochester: They reduce the channel dimension from 48 to 28 from the original design and introduce additional convolution to stabilize the attention feature maps from SPAB blocks. Example input and output are adapted from [99]." + ], + "image_footnote": [], + "bbox": [ + 516, + 87, + 908, + 589 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "proposal to preserve the original parameter distributions as much as possible, termed PlayerAug.", + "bbox": [ + 511, + 664, + 906, + 694 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Training Details. After pruning the SPAN, they train it on the DIV2K_LSDIR mixed training set, cropping the patch size to 512. The random rotation and flip are configured for data augmentation. The Adam [54] optimizer with $\\beta_{1} = 0.9$ and $\\beta_{2} = 0.99$ and the L1 loss function are adopted to optimize the models, and the mini-batch size is set to 32. All the experiments are conducted on 8 L40S GPUs.", + "bbox": [ + 511, + 696, + 908, + 801 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "4.10. Rochester", + "text_level": 1, + "bbox": [ + 511, + 816, + 637, + 830 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Method Details. The proposed method, ESRNet, is an improved and more efficient variant of last year's XiaomiMM SPAN network [112]. The original SPAN network demonstrated strong generation quality but required", + "bbox": [ + 511, + 839, + 906, + 902 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 490, + 924, + 509, + 936 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "complex training tricks and model fusion strategies, making it difficult to reproduce and computationally expensive. In contrast, ESRNet achieves similar performance with significantly reduced computational overhead, enhanced training stability, and improved inference speed.", + "bbox": [ + 89, + 90, + 480, + 167 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Model Architecture. A key aspect of ESRNet's design is its ability to maintain high performance while reducing computational costs. As shown in Fig. 11, their modifications include:", + "bbox": [ + 89, + 194, + 480, + 253 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Retaining the first six SPAN attention blocks as core feature extraction components while introducing a lightweight convolutional layer to refine the extracted feature maps before fusing them with the original features. This modification enhances feature representation while stabilizing the training process.", + "- Reducing the number of feature channels from 48 to 26, leading to a substantial decrease in both model parameters and floating-point operations (FLOPs). This reduction not only lowers GPU memory consumption but also improves inference efficiency without degrading performance.", + "- Improved validation speed, as ESRNet requires fewer computations per forward pass, making it more suitable for real-time applications compared with the baseline method." + ], + "bbox": [ + 91, + 258, + 482, + 500 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Overall, ESRNet has approximately half the number of parameters and FLOPs compared to the baseline EFPN network, yet it maintains a high PSNR score, demonstrating that their modifications achieve an excellent trade-off between efficiency and performance.", + "bbox": [ + 89, + 506, + 482, + 583 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Training Methodology. They train ESRNet on RGB image patches of size $256 \\times 256$ , applying standard augmentation techniques such as random flipping and rotation to enhance generalization. To ensure stable convergence and optimal performance, they adopt a three-stage training strategy:", + "bbox": [ + 89, + 608, + 482, + 700 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. Initial Feature Learning: They train the model with a batch size of 64 using Charbonnier loss, a robust loss function that mitigates the effects of outliers. The Adam optimizer is used with an initial learning rate of $2 \\times 10^{-4}$ , which follows a cosine decay schedule.", + "2. Refinement Stage: They progressively decrease the learning rate linearly from $2 \\times 10^{-4}$ to $2 \\times 10^{-5}$ , allowing the model to refine its learned features while maintaining stable gradients.", + "3. Fine-Tuning with L2 Loss: In the final stage, they adopt L2 loss to fine-tune the model, further enhancing detail restoration. The learning rate is further reduced from $2 \\times 10^{-5}$ to $1 \\times 10^{-6}$ for smooth convergence." + ], + "bbox": [ + 91, + 704, + 482, + 900 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "By structuring the training into these stages, they eliminate the need for complex training tricks used in previous approaches while achieving more stable and reliable optimization.", + "bbox": [ + 511, + 90, + 903, + 150 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "One of the most significant advantages of ESRNet is its improved validation time due to its optimized architecture. Compared to the original SPAN network, ESRNet achieves a similar PSNR score while reducing computational complexity. The model requires significantly fewer FLOPs and parameters, leading to a noticeable reduction in inference time and GPU memory usage. This makes ESRNet a practical solution for applications requiring both high-quality generation and efficient computation.", + "bbox": [ + 511, + 152, + 903, + 289 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "4.11.IESR", + "text_level": 1, + "bbox": [ + 513, + 300, + 601, + 314 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Model Design. As for the Efficint Super-Resolution competition, they proposed the Inference Efficient Super-Resolution Net (IESRNet). IESRNet is not a specific network, but a bag of tricks to make a Super-Resolution Network infer more Efficient on a GPU. They will apply these tricks based on DIPNet [128], which won the first place on the NTIRE2023 ESR challenge in runtime track [65]. The specific structure of IESRNet is shown in Fig. 12. They will describe the tricks they used in detail below.", + "bbox": [ + 511, + 323, + 903, + 458 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. Remove bias in Conv. The bias add of the convolution is a relatively inefficient operation in the convolution layer. It only occupies a small part of the FLOPs in the convolution, but occupies $15\\%$ or more of the runtime. They removed the bias of all convolutional layers except the ESA module, and the PSNR loss was less than 0.01db.", + "2. Less Residual Connection. Although residual connection helps the model converge during training, too many residual structures will introduce many additional operations, reducing the inference efficiency of the model. Therefore, they replace the two middle RRFB in DIPNet with reparameterization no residual block(RNRB) to balance the trade-off between inference efficiency and model accuracy.", + "3. Standard number of Conv channels. Since the convolution operator has different performance optimizations for different configurations, generally, convolutions with a standard number of channels (such as 32, 48, and 64) are more deeply optimized and therefore occupy higher inference efficiency on the GPU. Based on NVIDIA V100 GPU testing, a 48-channel $3^{*}3$ convolution is even faster than a 30-channel convolution, although the FLOPs is over doubled. For this reason, they set the number of feature channels to 32, and the number of ESA channels to 16.", + "4. Efficient activation function. They replace all activation functions in the network with SiLU [27], which performs well in super-resolution tasks and significantly outperforms the RELU. In addition to its great performance, SiLU is also very fast when inferring on GPUs due to its computational characteristics." + ], + "bbox": [ + 511, + 460, + 903, + 898 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 490, + 924, + 506, + 935 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/f7a459c9d7c6eda2acf426998d26a40c8bebbe250c1ca7ffe0217eb5634b2e71.jpg", + "image_caption": [ + "Figure 12. Team IRSR: The overview of the proposed IESRNet. The IESRNet is built based on DIPNet [128]." + ], + "image_footnote": [], + "bbox": [ + 89, + 85, + 911, + 460 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "5. Reparameterization. They adopt re-parameterization to enhance the representation capabilities of the model. They use complex re-parameterization structures to train during training and merge them into regular convolutions during inference without incurring additional computational overhead. The specific rep-structure is shown in Fig. 12(c).", + "bbox": [ + 88, + 512, + 480, + 618 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Implementation Details. The training dataset consists of DIV2K and the first 15,000 images of LSIDR [64]. Random flipping and rotation are adopt for Data Augmentation. They adopt a multi-stage training paradigm to train their super-resolution network. The details of training steps are as follows:", + "bbox": [ + 88, + 619, + 482, + 710 + ], + "page_idx": 15 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. Initial training: HR patches of size $256 \\times 256$ are randomly cropped from HR images. They set the mini-batch as 128. The model is trained by minimizing the PSNR loss with the Adam optimizer. The initial learning rate is set to 5e-4, and halved per 200k iterations. The total number of iterations is 1000k.", + "2. Warm-Start Training: Load the pre-trained weight and train it three times with the same setting.", + "3. Finetune with increasing patch size: In this process, the training patch size is progressively increased to improve the performance, which is selected from [384, 512, 640]. For each patch size, they finetune the network with $1000\\mathrm{k}$" + ], + "bbox": [ + 89, + 714, + 482, + 900 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "iterations. And the initial learning rate is correspondingly selected from [2e-4, 1e-4, 5e-5]. The batch size decreases to 64 for saving GPU memory. All experiments are conducted on 8 NVIDIA V100 GPUs.", + "bbox": [ + 511, + 512, + 906, + 571 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "4.12. ASR", + "text_level": 1, + "bbox": [ + 511, + 587, + 596, + 602 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Model Design. The network architecture is built based on DIPNet [128], which won the first place on the NTIRE2023 ESR challenge runtime track [65]. They made several modifications to make it more efficient while maintaining the excellent performance. They call it DIPNetSlim.", + "bbox": [ + 511, + 611, + 905, + 686 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "First of all, they did not use pruning as DIPNet dose. Although it can decrease the model parameters, it will degrade the inference speed of the model due to the irregular number of convolution channels. These operator configurations are not deeply optimized. For this reason, they set the number of feature channels to 32, and the number of ESA channels to 16. Second, they re-parameterize all 3x3 convolutional layers in the network. They adopt re-parameterization to enhance the expressiveness of the model. They use complex re-parameterization structures to train during training and merge them into regular convolutions during inference without incurring additional infer overhead. In addition, they changed the last convolution before the residual connection from 3x3 to 1x1, saving parameters while retain-", + "bbox": [ + 511, + 689, + 906, + 900 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 490, + 924, + 508, + 936 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "ing the ability of feature normalization. Finally, they replace all activation functions in the network with SiLU [27], which performs well in super-resolution tasks and significantly outperforms the RELU.", + "bbox": [ + 89, + 90, + 480, + 151 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Implementation Details. The training dataset consists of DIV2K [103] and the first 15,000 images of LSIDR. The details of training steps are as follows:", + "bbox": [ + 89, + 152, + 482, + 196 + ], + "page_idx": 16 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. Initial Training: HR patches of size $256 \\times 256$ are randomly cropped from HR images. They set the mini-batch as 128. The model is trained by minimizing the PSNR loss with the Adam optimizer. The initial learning rate is set to 5e-4, and halved per 200k iterations. The total number of iterations is 1000k.", + "2. Warm-Start Training: Load the pre-trained weight and train it three times with the same setting.", + "3. Finetune with increasing patch size: In this process, the training patch size is progressively increased to improve the performance, which is selected from [384, 512, 640]. For each patch size, they finetune the network with $1000k$ iterations. And the initial learning rate is correspondingly selected from [2e-4, 1e-4, 5e-4]. The batch size decreases to 64 for saving GPU memory." + ], + "bbox": [ + 89, + 199, + 480, + 426 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "4.13. VPEG_O", + "text_level": 1, + "bbox": [ + 89, + 438, + 207, + 452 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "General Method Description. They introduce SAFMnV3, an enhanced version of SAFMN [96] for solving real-time image SR. This solution is mainly concentrates on improving the effectiveness of the spatially-adaptive feature modulation (SAFM) [96] layer. Different from the original SAFMN, as shown in Fig 13, the simplified SAFM layer is able to extract both local and non-local features simultaneously without channel splitting. Within this module, they use two $3 \\times 3$ convolutions to project the input and use variance-constrained feature modulation operator [144] in branches with fewer channels, and finally aggregate these two parts of the feature, then refine the aggregated features via a feed-forward neural network.", + "bbox": [ + 88, + 460, + 482, + 655 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Training Description. The proposed SAFMNv3 consists of 6 feature mixing modules, and the number of channels is set to 40. They rain the network on RGB channels and augment the training data with random flipping and rotation. Following previous methods, the training process is divided into three stages:", + "bbox": [ + 88, + 657, + 482, + 748 + ], + "page_idx": 16 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. In the first stage, they randomly crop $256 \\times 256$ HR image patches from the selected LSIDR [64] dataset, with a batch size of 64. The proposed SAFMNv3 is trained by minimizing L1 loss and the frequency loss[14] with Adam optimizer for total 800, 000 iterations. The initial learning rate is set to 2e-3, with a Cosine Annealing scheme [78].", + "2. In the second stage, they increase the size of the HR image patches to $384 \\times 384$ . The model is fine-tuned on the DF2K [100] by minimizing Charbonnier loss function." + ], + "bbox": [ + 84, + 750, + 482, + 900 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "The initial learning rate is set to 5e-4, and the total iterations is $500\\mathrm{k}$", + "bbox": [ + 526, + 90, + 903, + 119 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "3. In the third stage, the batch size is set to 64, and PSNR loss is adopted to optimize over $300\\mathrm{k}$ iterations. The initial learning rate is set to 5e-5.", + "bbox": [ + 506, + 121, + 903, + 167 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Throughout the training process, they also employ an Exponential Moving Average (EMA) strategy to enhance the robustness of training.", + "bbox": [ + 511, + 170, + 903, + 215 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "4.14.mmSR", + "text_level": 1, + "bbox": [ + 511, + 236, + 612, + 250 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Method. They improve the model based on SAFMN++ [91] and name it FAnet as shown in Fig. 14. Compared to SAFMN++, their model achieves a higher PSNR with a lower computational cost. Unlike the original SAFMN++ method, they introduce modifications in both the data and model structure. In terms of model structure, as shown in the figure, they improve the Feature Mixing Module of the original architecture and incorporate the concept of reparameterization, designing the RFMM. They modify the convolutional extraction network preceding the original module into a parallel structure to accommodate multi-granularity feature extraction and apply re-parameterization [23] during inference. Furthermore, they adjust the downsampling factor in SimpleSAFM to 16 to achieve lower computational complexity. Regarding the data, in addition to utilizing the provided training dataset, they analyze the superresolution results of the model and identify common issues in fine-detail generation. Given constraints on model parameters and computational resources, it is impractical for a lightweight model to generate details identical to the ground truth. Therefore, they shift their focus to expanding the training dataset. Specifically, they use 10,800 images from the training dataset as input and employ convolutional neural networks such as Omni-SR [113] to generate new images. This additional data is incorporated into the training process to facilitate learning and mitigate the risk of learning bias caused by excessive learning difficulty.", + "bbox": [ + 511, + 262, + 903, + 669 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Training Details. They train their model on the DIV2K [100], Flickr2K [70], and LSDIR [64] datasets. The cropped low-resolution (LR) image size is set to 64 × 64 and subjected to random flipping and rotation. The FAnet model is optimized using the Adam optimizer with L1 loss minimization in a multi-stage training scheme. During the training phase, they set the initial learning rate to $2 \\times 10^{-3}$ and the minimum learning rate to $1 \\times 10^{-6}$ , training for 500,000 iterations with a mini-batch size of 512. In finetuning stage, Initialized with training phase weights, they fine-tune the model with the given training dataset and additional dataset which is proposed as above. They finetune the model using a learning rate of $1 \\times 10^{-4}$ and the minimum learning rate set to $1 \\times 10^{-6}$ , with a mini-batch size of 64.", + "bbox": [ + 511, + 674, + 906, + 898 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 490, + 924, + 508, + 936 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/8cf587e3cbc1927fbca4656b8736d9f84ac6f220c9227c1ad73401744af36b10.jpg", + "image_caption": [ + "Figure 13. Team VPEG_O: An overview of the proposed SAFMNv3." + ], + "image_footnote": [], + "bbox": [ + 99, + 92, + 898, + 220 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/9bbcbdf88644d05c0209ff8adeee4dc89fcb240fb6ca41121a1750176f9fa5bd.jpg", + "image_caption": [ + "Figure 14. Team mmSR: The overall network architecture of FAnet." + ], + "image_footnote": [], + "bbox": [ + 93, + 273, + 480, + 452 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "4.15. ChanSR", + "text_level": 1, + "bbox": [ + 89, + 517, + 202, + 532 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "General Method Description. They propose the Edge Enhanced Convolutional Network (EECNet) for the efficient super-resolution task. The network architecture is inspired by the design of SRN [118], while fully exploring the capacity of reparameterizable convolution. The whole architecture is shown in Fig. 15(a). They introduce a predefined High-Pass Filter (HPF) branch to explicitly capture edge details, formulated as:", + "bbox": [ + 89, + 540, + 483, + 661 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {K} _ {h p f} = \\frac {1}{1 6} \\left[ \\begin{array}{r r r} - 1 & - 2 & - 1 \\\\ - 2 & 1 2 & - 2 \\\\ - 1 & - 2 & - 1 \\end{array} \\right]. \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 179, + 675, + 482, + 724 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Then they integrate the proposed HPF into the EDBB [116], creating the subEEC module. As subEEC can be mathematically equivalent to a standard $3 \\times 3$ convolution, they replace the original $3 \\times 3$ convolution in RRRB [25] with our subEEC to obtain the final EEC architecture, whose structure is shown in Fig. 15(b). Notably, to ensure valid re-parameterization, they initialize the bias of the first convolution layer as zero to compensate for the zeropadding operation in subEEC.", + "bbox": [ + 89, + 733, + 482, + 868 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "To better capture global spatial information, they adopt the simplified Efficient Spatial Attention mechanism from", + "bbox": [ + 89, + 869, + 483, + 900 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "SRN [118], whose structure is shown in Fig. 15(c). Compared with the original ESA, this implementation removes the $1 \\times 1$ convolution layer and reduces computational complexity by employing only a single $3 \\times 3$ convolution in the convolutional group.", + "bbox": [ + 511, + 277, + 903, + 353 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Training Description. The proposed EECNet contains eight EEBs, in which they set the number of feature maps to 32. Also, the channel number of the ESA is set to 16 similar to [56]. Throughout the entire training process, they use the Adam optimizer [54], where $\\beta 1 = 0.9$ and $\\beta 2 = 0.999$ . The model is trained for $1000k$ iterations in each stage. Input patches are randomly cropped and augmented. Data augmentation strategies included horizontal and vertical flips, and random rotations of 90, 180, and 270 degrees. Model training was performed using Pytorch 1.12.0 [85] on RTX 3090. Specifically, the training strategy consists of several steps as follows.", + "bbox": [ + 511, + 354, + 906, + 532 + ], + "page_idx": 17 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. In the starting stage, they train the model from scratch on the 800 images of DIV2K [4] and the first 10k images of LSDIR [64] datasets. The model is trained for a total $10^{6}$ iterations by minimizing L1 loss and FFT loss [15]. The HR patch size is set to $256 \\times 256$ , while the mini-batch size is set to 64. They set the initial learning rate to $1 \\times 10^{-3}$ and the minimum one to $1 \\times 10^{-5}$ , which is updated by the Cosine Annealing scheme.", + "2. In the second stage, they increase the HR patch size to 384, while the mini-batch size is set to 32. The model is fine-tuned by minimizing the L1 loss and the FFT loss. They set the initial learning rate to $5 \\times 10^{-4}$ and the minimum one to $1 \\times 10^{-6}$ , which is updated by the Cosine Annealing scheme.", + "3. In the last stage, the model is fine-tuned with $480 \\times 480$ HR patches, however, the loss function is changed to minimize the combination of L2 loss and FFT loss [15]. Other settings are the same as Stage 2." + ], + "bbox": [ + 511, + 535, + 905, + 808 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "4.16. Pixel Alchemists", + "text_level": 1, + "bbox": [ + 511, + 818, + 687, + 832 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Network Architecture. The overall architecture of team Pixel Alchemists is shown in Fig. 16. They propose a novel architecture named resolution-consistent UNet (RCUNet). The proposed network consists of four deep feature comple", + "bbox": [ + 511, + 839, + 905, + 901 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 490, + 924, + 506, + 936 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/e9e1cda4695875e0b22a2ee15705c87b10f30d2b2a898093416b94ff467341a4.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 112, + 87, + 883, + 198 + ], + "page_idx": 18 + }, + { + "type": "image", + "img_path": "images/d48024e2b80356cc2faa2c14f1af7be0760ff89822241d890034e1dceb40e2d0.jpg", + "image_caption": [ + "Figure 15. Team ChanSR: Network architecture of the EECNet." + ], + "image_footnote": [], + "bbox": [ + 112, + 202, + 883, + 385 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "ment and distillation blocks (DFCDB). Inspired by [35, 83], the input feature map is split along the channel dimension in each block. Then, four convolutional layers process one of the split feature maps to generate complementary features. The input features and complementary features are concatenated to avoid loss of input information and distilled by a conv-1 layer. Besides, the output feature map of DFCDB is further enhanced by the ESA layer [55].", + "bbox": [ + 88, + 438, + 482, + 559 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Online Convolutional Re-parameterization. Reparameterization [136] has improved the performance of image restoration models without introducing any inference cost. However, the training cost is large because of complicated training-time blocks. To reduce the large extra training cost, online convolutional re-parameterization [41] is employed by converting the complex blocks into a single convolutional layer during the training stage. The architecture of RepConv is shown in Fig. 17. It can be converted to a $3 \\times 3$ convolution during training, which saves the training cost.", + "bbox": [ + 88, + 561, + 482, + 727 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Training Details. The proposed RCUNet has four DFCDBs. The number of features is set to 48, and the number of ESA channels is set to 16.", + "bbox": [ + 89, + 728, + 482, + 771 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "DIV2K [4] and LSDIR [64] datasets are used for training. The training details are as follows:", + "bbox": [ + 89, + 773, + 482, + 805 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "1. The model is first trained from scratch with $256 \\times 256$ patches randomly cropped from HR images from the DIV2K and LSDIR datasets. The mini-batch size is set to 64. The L1 loss and pyramid loss are minimized with the Adam optimizer. The initial learning rate is set to 1e-3 with a cosine annealing schedule. The total number of", + "bbox": [ + 86, + 809, + 482, + 900 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "iterations is 1000k.", + "bbox": [ + 526, + 438, + 656, + 452 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "2 Then the model is initialized with the pre-trained weights of Stage 1. The MSE loss and pyramid loss is used for fine-tuning with $512 \\times 512$ HR patches and a learning rate of 1e-5 for 500k iterations.", + "bbox": [ + 511, + 452, + 903, + 513 + ], + "page_idx": 18 + }, + { + "type": "image", + "img_path": "images/aeb111111f6a66fd1e33711c57d02e8b37f987757aecc478ba83f7f117f8f563.jpg", + "image_caption": [ + "Figure 16. Team Pixel Alchemists: RCUNet Architecture." + ], + "image_footnote": [], + "bbox": [ + 542, + 527, + 879, + 742 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "4.17.LZ", + "text_level": 1, + "bbox": [ + 511, + 801, + 584, + 816 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "General Method Description. To enhance model complexity without increasing computational overhead, they focus on designing structurally simple yet expressively powerful components, notably through re-parameterization techniques. Drawing inspiration from ECBSR [137],", + "bbox": [ + 511, + 824, + 903, + 900 + ], + "page_idx": 18 + }, + { + "type": "page_number", + "text": "19", + "bbox": [ + 490, + 924, + 508, + 936 + ], + "page_idx": 18 + }, + { + "type": "image", + "img_path": "images/e2f754ff416b95f767e85bf2241846e8853135980f1a33ca98e7b3b2dd78f4f5.jpg", + "image_caption": [ + "(a) Online Reparameterization" + ], + "image_footnote": [], + "bbox": [ + 130, + 95, + 450, + 224 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/65bfab3183f17ccd152cdcd70375e893a18feaf358a53ada5a84bdc2975a7327.jpg", + "image_caption": [ + "Figure 17. Team Pixel Alchemists: Online re-parameterization.", + "Figure 18. Team LZ: Detailed architecture of TDESR." + ], + "image_footnote": [], + "bbox": [ + 102, + 333, + 433, + 463 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "their TDESR framework strategically implements reparameterization to improve super-resolution performance while preserving training efficiency. Following the reparameterization phase, they employ tensor decomposition for light-weight network design, where standard $3 \\times 3$ convolutions are factorized into sequential $3 \\times 1$ and $1 \\times 3$ convolutional operations.", + "bbox": [ + 89, + 516, + 482, + 622 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "As illustrated in Fig. 18, their architecture comprises five TD Blocks interspersed with three standard $3 \\times 3$ convolutions, implementing a skip connection through elementwise addition between the input features (processed by a $3 \\times 3$ convolution) and intermediate feature maps. The network maintains 64 channels throughout, with tensor decomposition intermediate channels reduced to 32 for computational efficiency. They integrate insights from Swift-SR's parameter-free attention mechanism [112] to enhance feature representation. The final reconstruction stage employs PixelShuffle with 48 input channels for high-quality image upsampling, completing their balanced design of performance and efficiency.", + "bbox": [ + 89, + 623, + 482, + 819 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Training Details. The training details of team LZ are as follows.", + "bbox": [ + 89, + 820, + 482, + 849 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "- Base Training ( $\\times 2$ upscaling) The model is initially trained for $\\times 2$ super-resolution using randomly cropped $96 \\times 96$ HR patches with a batch size of 32. They employ", + "bbox": [ + 91, + 854, + 482, + 901 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/449a23221e1f675fa538f3ab016b13b78bd4d647f4c1a5ea675c158ab5a86d85.jpg", + "image_caption": [ + "Figure 19. Team Z6: Network architecture of GloReNet." + ], + "image_footnote": [], + "bbox": [ + 522, + 87, + 898, + 165 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "the Adam optimizer to minimize the L1 loss, starting with an initial learning rate of $1 \\times 10^{-4}$ that decays via Multi-StepLR scheduler at the mid-training point. The training completes over 100 epochs, utilizing re-parameterization techniques throughout the process.", + "bbox": [ + 524, + 217, + 903, + 292 + ], + "page_idx": 19 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Enhanced Resolution Training. Building upon the $\\times 2$ pretrained weights, this phase increases the HR patch size to $128 \\times 128$ while reducing the batch size to 16. All other hyperparameters (optimizer, learning rate schedule, and re-parameterization) remain consistent with Stage 1. The continued use of L1 loss maintains training stability during this resolution scaling phase.", + "- Convolutional Architecture Refinement. They implement standard $3 \\times 3$ convolutional layers in this optimization stage, replacing previous architectural components. The training objective shifts to L2 loss minimization for fine-tuning, while preserving the fundamental network structure and parameter initialization from earlier stages. This transition enhances edge preservation in super-resolved outputs.", + "- Tensor Decomposition Optimization. The final refinement employs tensor decomposition techniques with dual loss supervision $(\\mathrm{L1} + \\mathrm{L2})$ . Training progresses with $256 \\times 256$ HR patches using a reduced batch size of 16 and lower initial learning rate $(1 \\times 10^{-5})$ . They implement cosine annealing scheduling for smooth convergence, completing the multi-stage optimization process through L2-loss-focused fine-tuning.." + ], + "bbox": [ + 513, + 292, + 903, + 640 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "4.18.Z6", + "text_level": 1, + "bbox": [ + 513, + 651, + 580, + 664 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "General Method Description. They introduce a lightweight and efficient image super-resolution (SR) network that leverages both global and local feature attention mechanisms to produce high-quality reconstructions. As depicted in Fig. 19, their network is divided into two main blocks named Global Feature Attention Block (GFAB) and Local Feature Attention Block (LFAB).", + "bbox": [ + 511, + 672, + 903, + 777 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "GFAB is designed to capture large-scale context and dependencies across the entire image. Enhances globally significant features, helping the model learn the global information from input images. And LFAB can focus on refining fine-grained details and spatially localized information. Emphasizes subtle textural elements and sharp edges that are critical for upscaling. GFAB utilizes the parameter-free attention module (SPAN [111]) and LFAB uses Effi", + "bbox": [ + 511, + 779, + 906, + 900 + ], + "page_idx": 19 + }, + { + "type": "page_number", + "text": "20", + "bbox": [ + 488, + 924, + 508, + 936 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "cient Spatial Attention (ESA) [72] to selectively highlight essential features. And all convolution layers applied reparameterization block [127]. The network begins with a series of convolution layers to extract initial features, which then pass through GFAB units for global attention. Subsequently, the output is processed by LFAB units for local attention, and finally, a PixelShuffle layer upscales the features to the target resolution. By combining these two parts, their method effectively preserves global context and local details, achieving a balance between high-quality reconstruction and efficient low computation.", + "bbox": [ + 89, + 90, + 480, + 256 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Training Description. Their training process employs a scratch training stage and a fine-tuning stage. In the first scratch training stage, they use DIV2K datasets for the training dataset. In the fine-tuning stage, they use DIV2K and the first 10K LSDIR datasets for the training dataset. All experiments are carried out in the same experimental environment. The training process is executed using RTX A6000 GPUs. They use the Pytorch 1.13 version for all training steps.", + "bbox": [ + 89, + 257, + 480, + 393 + ], + "page_idx": 20 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Scratch train stage: In the first step, their model is trained from scratch. The LR patches were cropped from LR images with an 8 mini-batch of $256 \\times 256$ . Adam optimizer is used with a learning rate of 0.0005 during scratch training. The cosine warm-up scheduler is used. The total number of epochs is set to 2000. They use the $l1$ loss.", + "- Fine-tuning stage: In the second step, the model is initialized with the weights trained in the first step. To improve precision, they used the loss method $l2$ loss. This stage improves the value of the peak signal-to-noise ratio (PSNR) by $0.05 \\sim 0.06$ dB. In this step, The LR patches are cropped from LR images with 32 mini-batch $512 \\times 512$ sizes. And the initial learning rate is set to 0.00005 and the Adam optimizer is used in conjunction with a cosine warm-up. The total epoch is set to 200 epochs." + ], + "bbox": [ + 89, + 397, + 480, + 622 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "4.19. TACO_SR", + "text_level": 1, + "bbox": [ + 89, + 635, + 215, + 648 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "General Method Description. The overall architecture of their network is showed in Fig. 20(a), inspired by SPAN [110] and PFDNLite [91]. Motivated by the design of the Conv3XC module in SPAN, they introduce two additional parallel branches with varying channel expansion ratios, resulting in a novel convolution module termed TenInOneConv, which fuses multiple convolution kernels into a single equivalent kernel to improve inference efficiency. Furthermore, to enhance the model's capability in capturing local texture and detail features, the LocalAttention module, inspired by PFDNLite is integrated, allowing the network to better focus on informative regions within feature maps.", + "bbox": [ + 89, + 657, + 480, + 838 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "TenInOneSR employs four TenInOneBlock modules. Each of these blocks (detailed in Fig. 20(b)) begins with a LocalAttention module, which enhancing the network's ability to capture fine details. Subsequently, each block ap", + "bbox": [ + 89, + 839, + 480, + 900 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "plies three cascaded TenInOneConv layers, interleaved with the SiLU activation function, to perform hierarchical feature refinement. The block concludes with a residual connection, allowing better gradient flow.", + "bbox": [ + 511, + 90, + 903, + 150 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Notably, the behavior of the TenInOneConv differs between the training and inference phases. During training (Fig. 20(d)), TenInOneConv operates in a multi-branch configuration. It introduces three parallel convolutional branches with different channel expansion ratios (gains set as 1, 2, and 3), along with an additional skip connection. This multi-scale feature extraction enables the network to better aggregate complementary spatial features.", + "bbox": [ + 511, + 152, + 903, + 273 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "In the inference stage (Fig. 20(f)), for computational efficiency and faster runtime, these multiple convolution kernels are fused into a single equivalent convolution kernel. Specifically, the parallel branches and skip connection weights are mathematically combined to form one unified $3 \\times 3$ convolutional kernel, significantly accelerating inference without compromising performance.", + "bbox": [ + 511, + 273, + 903, + 378 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Training description. The proposed architecture is trained on two NVIDIA RTX Titan GPUs with a total of 48 GB memory. In the first training stage, the DIV2K dataset is augmented by a factor of $85 \\times$ and registered into the LSDIR format, resulting in a large-scale training set containing 152,991 high-resolution RGB images. During this stage, training is conducted with 64 randomly cropped $256 \\times 256$ patches per batch, using common augmentations such as random flipping and rotation. The model is optimized using the Adam optimizer with L1 loss for a total of 100,000 iterations. The learning rate is initialized at $5 \\times 10^{-4}$ and decayed by half every 20,000 iterations. In the second stage, they keep the training strategy and hyperparameters unchanged, except for increasing the input patch size to $384 \\times 384$ and reducing the batch size to 32 to fit GPU memory. Then another 100,000 training iterations are conducted to further improve the model's performance on higher-resolution textures.", + "bbox": [ + 511, + 396, + 903, + 667 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "4.20.AIOT.AI", + "text_level": 1, + "bbox": [ + 511, + 681, + 632, + 695 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Method. The overall architecture of their network is shown in Fig. 21(a), inspired by the previous leading methods SPAN[112] and ECBSR[138]. They propose an Efficient channel attention super-resolution network acting on space (ECASNet). Specifically, on the basis of SPAB from SPAN, they combine edge-oriented convolution block (ECB) and regularization module (GCT) to form a new reparameterized feature extraction module named enhanced attention and re-parameterization block(EARB), as shown in Fig. 21(b). In addition, unlike SPAN, they find that using channel attention after feature map concatenating can significantly improve performance. For the sake of lightweight design, they use an efficient channel attention", + "bbox": [ + 511, + 703, + 903, + 900 + ], + "page_idx": 20 + }, + { + "type": "page_number", + "text": "21", + "bbox": [ + 488, + 924, + 506, + 936 + ], + "page_idx": 20 + }, + { + "type": "image", + "img_path": "images/06848c39c978127dbf1a5777572509c2538e8cda239227794323ca26f32c9d74.jpg", + "image_caption": [ + "Figure 20. Team TACO_SR: The architecture of proposed TenInOneSR." + ], + "image_footnote": [], + "bbox": [ + 89, + 85, + 330, + 559 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/5aeaf5ee1c5610a62b97273c69623209ee7d7db802eb52f815951d41d89ec85f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 344, + 85, + 589, + 353 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/b2dafe71e9e3063b6f4e6e0d7fe0c81118d087eca6c5fbfdf98428625d1d76de.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 614, + 85, + 903, + 318 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/acb7e256b36e27fbb9227c2c97f2747745796ad01d4a69839743b8c4c6ab22db.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 344, + 353, + 589, + 559 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/49df396ea37e71a78da20d8f92ed483037fdc7b9986cdfebb18dda7676b67431.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 617, + 328, + 910, + 559 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "module, called the efficient channel attention module which acts on space(CAS), as shown in Fig. 21(c).", + "bbox": [ + 89, + 608, + 482, + 638 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Training Detail. The datasets used for training include DIV2K and LSDIR. Imitating the previous method, the training process is divided into two stages. In the first stage, they randomly crop $256 \\times 256$ HR image blocks from the ground truth image, batch is 16, and randomly flipped and rotated them. Using Adam optimizer, set $\\beta 1 = 0.9$ and $\\beta 2 = 0.999$ , and minimize L1 loss function. The initial learning rate is set to 5e-4, and the cosine learning rate attenuation strategy is adopted. Epoch is set to 200. In the second stage, they changed the loss function to L2, and other settings are the same as those in the first stage.", + "bbox": [ + 89, + 638, + 483, + 806 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "4.21.JNU620", + "text_level": 1, + "bbox": [ + 89, + 816, + 199, + 832 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "General Method Description. They propose a reparameterized residual local feature network (RepRLFN) for efficient image super-resolution, which is influenced by existing studies such as RepRFN [19] and RLFN [55]. Fig. 22", + "bbox": [ + 89, + 839, + 483, + 902 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "illustrates the overall architecture of RepRLFN, which has been extensively validated in previous studies.", + "bbox": [ + 511, + 608, + 906, + 638 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "They replace the RLFB in RLFN [55] with their reparameterized residual local feature block (RepRLFB). RepBlock is the main component of RepRLFB, which employs multiple parallel branch structures to extract the features of different receptive fields and modes to improve performance. At the same time, the structural re-parameterization technology is leveraged to decouple the training and inference phases to avoid the problem that computational complexity increases caused by the introduction of multi-branch.", + "bbox": [ + 511, + 640, + 906, + 790 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Training Strategy. The proposed RepRLFN consists of 4 RepRLFBs, with the number of feature channels set to 48. The details of the training steps are as follows:", + "bbox": [ + 511, + 792, + 908, + 838 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "1. In the first stage, the model is pre-trained on DIV2K [4]. HR patches of size $480 \\times 480$ are randomly cropped from HR images, and the mini-batch size is set to 32. The model is trained by minimizing the L1 loss function", + "bbox": [ + 511, + 839, + 908, + 901 + ], + "page_idx": 21 + }, + { + "type": "page_number", + "text": "22", + "bbox": [ + 488, + 924, + 508, + 936 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/cd71cb66b2605332b1a6f6ce4dc15f144a84d5fa912a642fd7c10063bb2be48b.jpg", + "image_caption": [ + "(b) ECASNet" + ], + "image_footnote": [], + "bbox": [ + 102, + 88, + 869, + 212 + ], + "page_idx": 22 + }, + { + "type": "image", + "img_path": "images/d5280ba3c0c422f04fb814ae78615d895235df2419f06168de54ab34712b08dd.jpg", + "image_caption": [ + "(b) EARB" + ], + "image_footnote": [], + "bbox": [ + 91, + 255, + 478, + 368 + ], + "page_idx": 22 + }, + { + "type": "image", + "img_path": "images/f9533c63c96b88d6982d85f2095e9f195b9d8592e0275d4fb83d1ad4cc7289c3.jpg", + "image_caption": [ + "(c) CAS", + "Figure 21. Team AIOT.AI: Detailed architecture of the proposed ECASNet." + ], + "image_footnote": [], + "bbox": [ + 166, + 407, + 361, + 473 + ], + "page_idx": 22 + }, + { + "type": "image", + "img_path": "images/8bd2ef8051dea56cf5a345b62b7708f0cb0526c294db8e195419e3b299cee319.jpg", + "image_caption": [ + "(d) RepConv" + ], + "image_footnote": [], + "bbox": [ + 500, + 257, + 906, + 470 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "using the Adam optimizer. The initial learning rate is set to 5e-4 and is halved every 200 epochs. The total number of epochs is 800.", + "bbox": [ + 89, + 570, + 482, + 614 + ], + "page_idx": 22 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "2. In the second stage, the model is fine-tuned on 3450 images from DIV2K [4] and Flickr2k [101] (DF2K) and the first 10k images from LSDIR [64]. HR patches of size $640 \\times 640$ are randomly cropped from HR images, and the mini-batch size is set to 32. The model is fine-tuned by minimizing the L2 loss function. The initial learning rate is set to 2e-4 and is halved every 5 epochs. The total number of epochs is 25.", + "3. In the third stage, the model is fine-tuned again on 3450 images from DF2K and the first 10k images from LSDIR [64]. The HR patch size and minibatch size are set to $640 \\times 640$ and 32, respectively. The model is fine-tuned by minimizing the L2 loss function. The initial learning rate is set to 1e-4 and is halved every 5 epochs. The total number of epochs is 20.", + "4. In the fourth stage, the model is fine-tuned on 3450 images from DF2K and the first $10\\mathrm{k}$ images from LSDIR [64]. The HR patch size and minibatch size are set" + ], + "bbox": [ + 89, + 619, + 483, + 900 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "to $640 \\times 640$ and 32, respectively. The model is fine-tuned by minimizing the L2 loss function. The learning rate is set to 5e-5, and the total number of epochs is 10. To prevent over-fitting, the model ensemble via stochastic weight averaging [46] (SWA) is performed during the last 8 epochs to obtain the final model for testing.", + "bbox": [ + 511, + 570, + 906, + 661 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "4.22. LVGroup_HFUT", + "text_level": 1, + "bbox": [ + 511, + 679, + 687, + 695 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "General Method Description. The Swift Parameter-free Attention Network (SPAN) [112] introduces a novel parameter-free attention mechanism to address the tradeoff between performance and computational complexity, as shown in 23. SPAN employs symmetric activation functions (e.g., shifted Sigmoid) applied to convolutional layer outputs to generate attention maps without learnable parameters, enhancing high-contribution features while suppressing redundant information. Residual connections within each Swift Parameter-free Attention Block (SPAB) mitigate information loss and preserve low-level features. The lightweight architecture with cascaded SPABs achieves fast inference by avoiding parameter-heavy attention computa", + "bbox": [ + 511, + 703, + 906, + 900 + ], + "page_idx": 22 + }, + { + "type": "page_number", + "text": "23", + "bbox": [ + 488, + 924, + 506, + 936 + ], + "page_idx": 22 + }, + { + "type": "image", + "img_path": "images/dfc46883933c059577cb6e6eeaa96eebd866af9c6728c75dfb5d979abc1dad54.jpg", + "image_caption": [ + "Figure 22. Team JUN620: The network architecture of RepRLFN" + ], + "image_footnote": [], + "bbox": [ + 102, + 89, + 897, + 470 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/de492b7ced705d8f7a88d48385420eb72c09a6323ea94714fef518a990277b96.jpg", + "image_caption": [ + "Figure 23. LVGroup_HFUT: The overall framework of SPAN." + ], + "image_footnote": [], + "bbox": [ + 96, + 526, + 478, + 623 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "tions while maintaining reconstruction quality through hierarchical feature aggregation and pixel-shuffle upsampling.", + "bbox": [ + 89, + 724, + 482, + 755 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Training Details. They trained the SPAN model [112] on a mixed dataset composed of DIV2K [104] and LSDIR [64], setting feature_channels to 48, where the crop size of images is $256 \\times 256$ . They used the Adam optimizer with L1 loss, an initial learning rate of 5e-4, and trained for a total of 1000k iterations, halving the learning rate every 200k iterations. Training was completed using a single NVIDIA RTX 4090 GPU.", + "bbox": [ + 89, + 779, + 483, + 898 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/f9f256e2bdc25f83ac89a801417e8634751d782b6c46300dbea253a6df644900.jpg", + "image_caption": [ + "Figure 24. Team YG: The Spatial-gate self-distillation network (SGSDN)." + ], + "image_footnote": [], + "bbox": [ + 516, + 522, + 903, + 566 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "4.23.YG", + "text_level": 1, + "bbox": [ + 513, + 628, + 586, + 642 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "4.23.1. Method Details.", + "text_level": 1, + "bbox": [ + 513, + 651, + 676, + 664 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "The Primary idea of the proposed SGSDN is to explore nonlocal information in a SA-like manner while modeling local details for efficient image super-resolution. This section will start by introducing the overall architecture of SGSDN and then explain the SGM and ESD in detail.", + "bbox": [ + 511, + 670, + 906, + 744 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Network Architecture The overall structure of the SGSDN is shown in Fig. 24. It consists of three stages: shallow feature extraction, deep feature extraction, and image reconstruction. First, they use a $3 \\times 3$ convolutional layer to extract shallow features, which is expressed as:", + "bbox": [ + 511, + 746, + 906, + 821 + ], + "page_idx": 23 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {I} _ {s} = F _ {\\text {C o n v 3} \\times 3} (\\mathbf {I} _ {L R}), \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 632, + 830, + 903, + 847 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "where, $F_{Conv3 \\times 3}$ represents the shallow feature extraction module using a $3 \\times 3$ convolutional layer. The obtained shallow feature is denoted as $\\mathbf{I}_s$ . Subsequently, the extracted", + "bbox": [ + 511, + 854, + 906, + 900 + ], + "page_idx": 23 + }, + { + "type": "page_number", + "text": "24", + "bbox": [ + 488, + 924, + 508, + 936 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/cba831f90e681281e70d07836d242412ebf29f6e98714494d5a04829c493e39c.jpg", + "image_caption": [ + "Figure 25. Team YG: The details of each component. (a) SGM: Spatial-gate modulation module; (b) ESD: Enhanced self-distillation module." + ], + "image_footnote": [], + "bbox": [ + 153, + 89, + 421, + 207 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "shallow features are fed to several stacked SGSDBs to produce deep representative features, This process can be expressed as:", + "bbox": [ + 89, + 289, + 483, + 333 + ], + "page_idx": 24 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {I} _ {k} = F _ {S G S D B} ^ {k} \\left(\\mathbf {I} _ {k - 1}\\right), k = 1, \\dots , n, \\tag {10}\n$$\n", + "text_format": "latex", + "bbox": [ + 161, + 343, + 482, + 361 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "where, $F_{SGSDB}^{k}(\\cdot)$ represents the $k$ -th SGSDB, $\\mathbf{I}_{k-1}$ and $\\mathbf{I}_k$ denote the input and output features of the $k$ -th SGSDB, respectively. Each SGSDB consists of three SGMs and an ESD. Given an input feature $\\mathbf{I}_t$ , the mapping process of SGSDB can be represented as:", + "bbox": [ + 89, + 371, + 483, + 446 + ], + "page_idx": 24 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\mathbf {I} _ {d _ {1}} = F _ {S G M} (\\mathbf {I} _ {t}), \\\\ \\mathbf {I} _ {d _ {2}} = F _ {S G M} (\\mathbf {I} _ {d _ {1}}), \\\\ \\mathbf {I} _ {d _ {3}} = F _ {S G M} \\left(\\mathbf {I} _ {d _ {2}}\\right) + \\mathbf {I} _ {t}, \\\\ \\mathbf {I} _ {o} = F _ {E S D} (\\mathbf {I} _ {d _ {3}}) + \\mathbf {I} _ {d _ {3}} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 207, + 457, + 480, + 530 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "where, $F_{SGM}$ represents the SGM, $F_{ESD}$ represents the ESD. After the deep feature extraction block, the representative features are processed by a $3 \\times 3$ standard convolution layer and a pixel shuffle operation [94] to reconstruct the high-quality SR image. To take advantage of high-frequency information, they insert a long-distance residual connection before the image reconstruction module. The reconstruction stage is described as follows", + "bbox": [ + 89, + 539, + 483, + 660 + ], + "page_idx": 24 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {I} _ {S R} = F _ {\\text {P i x e l S h u f f l e}} \\left(F _ {\\text {C o n v 3} \\times 3} \\left(\\mathbf {I} _ {d} + \\mathbf {I} _ {s}\\right)\\right), \\tag {12}\n$$\n", + "text_format": "latex", + "bbox": [ + 127, + 671, + 482, + 686 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "where $\\mathbf{I}_d$ denotes the deep feature obtained by the stacked SGSDBs, and $F_{Conv3\\times 3}(\\cdot)$ indicates the $3\\times 3$ standard convolution layer. $F_{PixelShuffle}(\\cdot)$ is used to upscale the final feature and output the SR reconstructed image $\\mathbf{I}_{SR}$ .", + "bbox": [ + 89, + 696, + 482, + 758 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Finally, to train the network, they use the $L_{1}$ loss function to minimize the pixel-level difference between the ground truth image $\\mathbf{I}_{GT}$ and the reconstructed image $\\mathbf{I}_{SR}$ , which can be expressed as:", + "bbox": [ + 89, + 758, + 483, + 818 + ], + "page_idx": 24 + }, + { + "type": "equation", + "text": "\n$$\nL _ {1} = \\left\\| \\mathbf {I} _ {S R} - \\mathbf {I} _ {G T} \\right\\| _ {1}, \\tag {13}\n$$\n", + "text_format": "latex", + "bbox": [ + 212, + 828, + 482, + 845 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "At the same time, they notice that only using the pixelwise loss function can not effectively generate more high-frequency details [15]. Thus, they accordingly employ a", + "bbox": [ + 89, + 854, + 483, + 901 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "frequency constraint to regularize network training. The adopted loss function for the network training is defined as", + "bbox": [ + 511, + 90, + 903, + 121 + ], + "page_idx": 24 + }, + { + "type": "equation", + "text": "\n$$\nL = L _ {1} + \\lambda \\| \\mathcal {F} (\\mathbf {I} _ {S R}) - \\mathcal {F} (\\mathbf {I} _ {G T}) \\|. \\tag {14}\n$$\n", + "text_format": "latex", + "bbox": [ + 586, + 128, + 903, + 143 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "where $\\mathcal{F}$ represents the Fast Fourier Transform, and $\\lambda$ is a weight parameter which is empirically set to 0.1.", + "bbox": [ + 511, + 152, + 903, + 181 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Spatial-gate modulation module Considering that the reason why the ViT-based model performs well is that SA explores non-local information and expands the effective receptive field of the model. They develop a lightweight spatial-gate modulation (SGM) module to collaboratively extract representative features, where the SAL branch exploits non-local features in a larger receptive field by integrating the dilated depth-wise convolutional layers with horizontal and vertical 1-D kernels, and the LKG branch captures local features in parallel. Moreover, to avoid potential block artifacts aroused by dilation, they adopt the gate mechanism to recalibrate the generated feature maps adaptively, as shown in Fig. 25.", + "bbox": [ + 511, + 181, + 906, + 377 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Given the input feature $\\mathbf{I}_{in} \\in R^{C \\times H \\times W}$ , where $H \\times W$ denotes the spatial size and $C$ is the number of channels. Specifically, they first apply a normalization layer and a point-by-point convolution to normalize information and expand the channel.", + "bbox": [ + 511, + 377, + 905, + 453 + ], + "page_idx": 24 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {I} _ {1} = F _ {\\text {C o n v 1} \\times 1} \\left(F _ {\\text {N o r m}} \\left(\\mathbf {I} _ {\\text {i n}}\\right)\\right), \\tag {15}\n$$\n", + "text_format": "latex", + "bbox": [ + 604, + 460, + 903, + 477 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "where, $F_{Norm}$ represents the $L_2$ normalization and $F_{Conv1\\times 1}$ denotes a $1\\times 1$ convolutional layer, $\\mathbf{I}_1\\in R^{2C\\times H\\times W}$ . Subsequently, the obtained features $\\mathbf{I}_1$ are split into two parts along the channel dimension, this process can be expressed as:", + "bbox": [ + 511, + 483, + 905, + 559 + ], + "page_idx": 24 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {I} _ {x}, \\mathbf {I} _ {y} = F _ {S} \\left(F _ {G} \\left(\\mathbf {I} _ {1}\\right)\\right), \\tag {16}\n$$\n", + "text_format": "latex", + "bbox": [ + 633, + 566, + 903, + 583 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "where $F_{G}$ denotes the GELU activation function [38], $F_{S}$ denotes a channel splitting operation, $\\mathbf{I}_x \\in R^{C \\times H \\times W}$ and $\\mathbf{I}_y \\in R^{C \\times H \\times W}$ . They then process the features $\\mathbf{I}_x$ and $\\mathbf{I}_y$ in parallel via the SAL and LKG branches, producing the non-local feature $\\mathbf{I}_n$ and local feature $\\mathbf{I}_l$ , respectively. It is worth mentioning that the SAL and LKG branches only need to be responsible for half the input signals, and the parallel processing is faster. Finally, they fuse the non-local feature $\\mathbf{I}_n$ and local feature $\\mathbf{I}_l$ together with channel concatenation to form a representative output of the SGM module. This process can be expressed as,", + "bbox": [ + 511, + 589, + 905, + 755 + ], + "page_idx": 24 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {I} _ {S G M} = F _ {C} \\left(\\mathbf {I} _ {n}, \\mathbf {I} _ {l}\\right), \\tag {17}\n$$\n", + "text_format": "latex", + "bbox": [ + 637, + 762, + 903, + 779 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "where, $\\mathbf{I}_{DSG}$ is the output feature and $F_{C}(\\cdot)$ is the channel cascade operation.", + "bbox": [ + 511, + 786, + 903, + 815 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "SA-like branch They exploit non-local features in a larger receptive field by integrating the dilated depth-wise convolutional layers with horizontal and vertical 1-D kernels.", + "bbox": [ + 511, + 816, + 903, + 859 + ], + "page_idx": 24 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\mathbf {I} _ {o} = F _ {D ^ {3} W C o n v 5 \\times 1 1} \\left(F _ {D W C o n v 5 \\times 1} \\right. \\tag {18} \\\\ \\left(F _ {D ^ {3} W C o n v 1 \\times 1 1} \\left(F _ {D W C o n v 1 \\times 5} (\\mathbf {I} _ {m})\\right)\\right)) \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 573, + 868, + 903, + 902 + ], + "page_idx": 24 + }, + { + "type": "page_number", + "text": "25", + "bbox": [ + 488, + 924, + 506, + 935 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "where $F_{DWConv1 \\times 5}(\\cdot)$ denotes the DWConv layer with a kernel of size $1 \\times 5$ , $F_{D^3 WConv1 \\times 11}(\\cdot)$ signifies the DWConv layer with a kernel of size $1 \\times 11$ and the dilated factor is set to 3, $F_{DWConv5 \\times 1}(\\cdot)$ denotes the DWConv layer with a kernel of size $5 \\times 1$ , $F_{D^3 WConv11 \\times 1}(\\cdot)$ signifies the DWConv layer with a kernel of size $11 \\times 1$ and the dilated factor is set to 3. Given that increasing the convolution kernel directly will greatly increase the parameter and computation amount, as well as increase the inference time of the model, whereas utilizing the dilated depth-wise convolutional layers with horizontal and vertical 1-D kernels will alleviate the problem. In this way, the information extraction capability of the convolutional layer is further enhanced without greatly increasing the number of computations. Moreover, to avoid potential block artifacts arising from dilation, they adopt the gate mechanism to recalibrate the generated feature maps adaptively. Finally, they use a $1 \\times 1$ convolution to distill the output feature for extracting the representative structure information $\\mathbf{I}_n$ .", + "bbox": [ + 89, + 90, + 483, + 380 + ], + "page_idx": 25 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {I} _ {n} = F _ {\\text {C o n v 1} \\times 1} \\left(\\mathbf {I} _ {o} * \\mathbf {I} _ {y}\\right) \\tag {19}\n$$\n", + "text_format": "latex", + "bbox": [ + 202, + 390, + 482, + 407 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "where $*$ represents the element-wise product operation.", + "bbox": [ + 89, + 419, + 457, + 433 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Local spatial-gate branch Local details are important for the pleasing high-frequency reconstruction. As the SAL branch prioritizes non-local structure information exploration, they develop a simple local spatial-gate branch to capture local features simultaneously. In detail, a $3 \\times 3$ depth-wise convolution is used to encode local information from the input features $\\mathbf{I}_x$ . Then, they use the gate mechanism to generate the enhanced local feature. Finally, they use a $1 \\times 1$ convolution with a GELU activation to distill the output features for extracting the representative detail information $\\mathbf{I}_l$ , which is achieved by,", + "bbox": [ + 89, + 434, + 483, + 599 + ], + "page_idx": 25 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\mathbf {I} _ {o} = F _ {D W C o n v 3 \\times 3} (\\mathbf {I} _ {x}) * \\mathbf {I} _ {y}, \\\\ \\mathbf {I} _ {o} = F _ {D W C o n v 3 \\times 3} (\\mathbf {I} _ {x}). \\end{array} \\tag {20}\n$$\n", + "text_format": "latex", + "bbox": [ + 187, + 611, + 480, + 635 + ], + "page_idx": 25 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {I} _ {l} = F _ {G} \\left(F _ {\\text {C o n v 1} \\times 1} \\left(\\mathbf {I} _ {o}\\right)\\right)\n$$\n", + "text_format": "latex", + "bbox": [ + 191, + 630, + 357, + 646 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "where $F_{DWConv3 \\times 3}(\\cdot)$ denotes the DWConv layer with a kernel of size $3 \\times 3$ , $F_{G}$ represents GELU activation function.", + "bbox": [ + 89, + 657, + 482, + 702 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Enhanced self-distillation module They present an enhanced self-distillation (ESD) module to expand and refine the features derived from the SGM in spatial and channel dimensions further. The ESD uses a $3 \\times 3$ depth-wise convolutional to expand spatial and channel information. Then they use the GLUE activation function to introduce nonlinearity and extend the representation of the network. Finally, the output features are fed into a $1 \\times 1$ convolution for further feature mixing and reducing the hidden channel back to the original input dimension. Given the input feature $\\mathbf{I}_{in} \\in R^{C \\times H \\times W}$ , this process can be formulated as,", + "bbox": [ + 89, + 704, + 483, + 869 + ], + "page_idx": 25 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {I} _ {l} = F _ {\\text {C o n v 1} \\times 1} \\left(F _ {G} \\left(F _ {\\text {D W C o n v 3} \\times 3} \\left(\\mathbf {I} _ {i n}\\right)\\right)\\right) \\tag {21}\n$$\n", + "text_format": "latex", + "bbox": [ + 147, + 883, + 482, + 902 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Training Details. Following previous works [66], they use the DF2K dataset, which consists of 800 images from DIV2K [4] and 2650 images from Flickr2K [70] as the training dataset. A sliding window slicing operation is used to decompose each HR image into $480 \\times 480$ patches for training. The LR images are obtained by downsampling the HR images using the MATLAB bicubic kernel function.", + "bbox": [ + 511, + 90, + 903, + 196 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "During the training, random rotation and horizontal flipping are used for data augmentation. The proposed SGSDN has 8 SGSDBs, in which the number of feature channels is set to 24. They start by pretraining the model on the DIV2K and Flickr2K datasets. The mini-batch size is set to 64. The model is trained by the ADAN optimizer [124] with $\\beta_{1} = 0.98$ , $\\beta_{2} = 0.92$ and $\\beta_{3} = 0.99$ , and the exponential moving average (EMA) is set to 0.999 to stabilize training. The initial and minimum learning rates are set to $5 \\times 10^{-3}$ and $1 \\times 10^{-6}$ , respectively, and decay according to cosine learning rate. The model is optimized using a combination of the $L_{1}$ loss and an FFT-based frequency loss function [15] for a total of $1 \\times 10^{6}$ iterations. The size of the randomly cropped LR patches is $64 \\times 64$ .", + "bbox": [ + 511, + 198, + 905, + 409 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "They then conduct fine-tuning on the DIV2K dataset and the first 10k images from LSDIR [64]. The input size is set to $96 \\times 96$ , with a batch size of 32. The fine-tuning process optimizes the model by starting with an initial learning rate of $3 \\times 10^{-3}$ , while keeping the rest consistent with pretraining. The fine-tuning phase encompasses a total of 100k iterations. They implemented our model on an NVIDIA RTX 3090 GPU using Pytorch.", + "bbox": [ + 511, + 410, + 905, + 531 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "4.24. NanoSR", + "text_level": 1, + "bbox": [ + 513, + 542, + 624, + 556 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Network Architecture. Their network architecture is inspired by SPAN [112] and PAN [142]. While maintaining the overall design of SPAN, they replace the SPAB block with the RepBlock. The RepBlock consists of a feature extractor using reparameterized convolution and a reparameterized pixel attention module. During training, the RepBlock operates in a complex mode to achieve better quality performance but can be equivalently transformed into a simple mode with fewer parameters and FLOPs. The detailed network architecture is illustrated in Fig. 26.", + "bbox": [ + 511, + 566, + 905, + 717 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Reparameterized Convolution. Reparameterized convolution plays a crucial role in improving the performance of efficient CNN-based super-resolution networks. They employ the RepMBCov introduced in PlainUSR [120], and this RepMBCov forms all the convolutions in the RepBlock. In addition, RepMBCov is derived from MobileNetV3 [39] Block (MBConv). The architecture of RepMBCov is depicted in Fig. 27.", + "bbox": [ + 511, + 718, + 905, + 839 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Implementation Details. They train the model using all 85,791 image pairs from the DIV2K and LSDIR datasets. Each image pair is cropped into $480 \\times 480$ sub-patches for training. During each training batch, 64 HR RGB patches", + "bbox": [ + 511, + 839, + 905, + 901 + ], + "page_idx": 25 + }, + { + "type": "page_number", + "text": "26", + "bbox": [ + 488, + 924, + 508, + 936 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/4b7b31467018d189fc76958a39af5df580c215fa4fa8a574934ee8b7d2b699dd.jpg", + "image_caption": [ + "Figure 26. Team NanoSR: The network architecture of RepRLFN" + ], + "image_footnote": [], + "bbox": [ + 143, + 117, + 883, + 429 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/f41c0d497ea15477ece5f1fc75a7c7d46d314cd74ff65e50caa4891ff1ad9ef1.jpg", + "image_caption": [ + "Figure 27. Team NanoSR: The network architecture of RepRLFN" + ], + "image_footnote": [], + "bbox": [ + 165, + 506, + 410, + 738 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "of size $128 \\times 128$ are randomly cropped and augmented with random flipping and rotation. The optimization objective is the $\\ell_1$ loss, and they use the AdamW optimizer ( $\\beta_{1} = 0.9$ , $\\beta_{2} = 0.99$ ) to train NanoSR. The learning rate is initialized at $5 \\times 10^{-4}$ and halved at $\\{250\\mathrm{k}, 400\\mathrm{k}, 450\\mathrm{k}, 475\\mathrm{k}\\}$ iterations within a total of $500\\mathrm{k}$ iterations. The proposed", + "bbox": [ + 89, + 809, + 483, + 902 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "method is implemented using the PyTorch framework on a single NVIDIA RTX 4090 GPU.", + "bbox": [ + 511, + 487, + 906, + 517 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "4.25. MegastudyEdu_Vision.AI", + "text_level": 1, + "bbox": [ + 511, + 585, + 756, + 603 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "General Method Description. To effectively model long-range dependency and extensive receptive field, inspired by CFSR [122], they propose the multi-scale aggregation attention network (MAAN), as illustrated in Fig. 28. MAAN reconstructs high-quality images through a shallow feature extractor, a stack of three residual multi-scale aggregation blocks (RMAB) composed of multi-scale aggregation attention layers (MAAL), a large separable kernel attention tail (LSKAT), and an image reconstruction module. Specially, MAAL captures global and local details via a multi-scale mixer and efficient feed-forward network (EFN) [122]. Given a low-resolution input image $I_{LR} \\in \\mathbb{R}^{3 \\times H \\times W}$ , shallow features such as edges, textures, and fine details are extracted using a $3 \\times 3$ convolution in the shallow feature extraction stage and passed to the MAAL. As shown in Fig. 28, the MAAL processing pipeline begins with an input $X$ , applying layer normalization, followed by a $1 \\times 1$ convolution and splitting the feature map into four groups", + "bbox": [ + 511, + 628, + 906, + 902 + ], + "page_idx": 26 + }, + { + "type": "page_number", + "text": "27", + "bbox": [ + 488, + 924, + 508, + 936 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/77c96b623860559eacc549d8e973b55a52ac782e82292d36ce71b6afab9761ca.jpg", + "image_caption": [ + "Figure 28. Team MegastudyEdu_Vision.AI: Overview of multi-scale aggregation attention network." + ], + "image_footnote": [], + "bbox": [ + 98, + 88, + 895, + 327 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "along the channel dimension:", + "bbox": [ + 89, + 377, + 287, + 391 + ], + "page_idx": 27 + }, + { + "type": "equation", + "text": "\n$$\nV = \\operatorname {C o n v} _ {1 \\times 1} (X),\n$$\n", + "text_format": "latex", + "bbox": [ + 261, + 402, + 392, + 417 + ], + "page_idx": 27 + }, + { + "type": "equation", + "text": "\n$$\nF _ {\\text {g a t e}} = \\operatorname {C o n v} _ {1 \\times 1} (X), \\tag {22}\n$$\n", + "text_format": "latex", + "bbox": [ + 238, + 421, + 480, + 444 + ], + "page_idx": 27 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} F _ {i d}, F _ {g a t e 1}, F _ {g a t e 2}, F _ {g a t e 3} = \\operatorname {S p l i t} (F _ {g a t e}), \\\\ = F _ {: g}, F _ {g: 2 g}, F _ {2 g: 3 g}, F _ {3 g}. \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 102, + 440, + 444, + 474 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Here, $F_{id}$ is the identity mapping without channel modification. The channel count used in convolution branches, denoted as $g$ , is determined by a ratio $r_g$ , computed as $g = r_g C$ . They set $r_g$ to 0.25. Subsequently, each branch is processed using large separable kernel (LSK), inspired by large separable kernel attention (LSKA) [57]:", + "bbox": [ + 89, + 483, + 483, + 575 + ], + "page_idx": 27 + }, + { + "type": "equation", + "text": "\n$$\nF _ {i d} ^ {\\prime} = F _ {i d},\n$$\n", + "text_format": "latex", + "bbox": [ + 214, + 585, + 287, + 604 + ], + "page_idx": 27 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{c} F _ {g a t e 1} ^ {\\prime} = L S K _ {1 1, 2} \\left(F _ {g a t e 1}\\right), \\\\ \\end{array} \\tag {23}\n$$\n", + "text_format": "latex", + "bbox": [ + 194, + 608, + 480, + 635 + ], + "page_idx": 27 + }, + { + "type": "equation", + "text": "\n$$\nF _ {g a t e 2} ^ {\\prime} = L S K _ {2 3, 3} \\left(F _ {g a t e 2}\\right),\n$$\n", + "text_format": "latex", + "bbox": [ + 194, + 630, + 379, + 648 + ], + "page_idx": 27 + }, + { + "type": "equation", + "text": "\n$$\nF _ {g a t e 3} ^ {\\prime} = L S K _ {3 5, 3} \\left(F _ {g a t e 3}\\right),\n$$\n", + "text_format": "latex", + "bbox": [ + 194, + 652, + 379, + 672 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "where $LSK_{k,d}$ indicates the kernel size $k$ and dilation factor $d$ . Each LSK is composed of consecutive $1 \\times k$ depth-wise convolution, $k \\times 1$ depth-wise convolution, $1 \\times k$ dilated depth-wise convolution, and $k \\times 1$ dilated depth-wise convolution. The distinct kernel sizes and dilation factors across branches effectively handle multi-scale features.", + "bbox": [ + 89, + 676, + 482, + 766 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "After concatenating the outputs from each branch, the combined result is integrated with $V$ through an element-wise product. Subsequently, $1 \\times 1$ convolution is applied to obtain the final output as follows:", + "bbox": [ + 89, + 768, + 483, + 828 + ], + "page_idx": 27 + }, + { + "type": "equation", + "text": "\n$$\nF _ {o u t} = \\operatorname {C o n v} _ {1 \\times 1} \\left(V \\odot \\operatorname {C o n c a t} \\left(F _ {i d} ^ {\\prime}, F _ {\\text {g a t e} 1} ^ {\\prime}, F _ {\\text {g a t e} 2} ^ {\\prime}, F _ {\\text {g a t e} 3} ^ {\\prime}\\right)\\right) \\tag {24}\n$$\n", + "text_format": "latex", + "bbox": [ + 89, + 837, + 483, + 869 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "This $F_{out}$ is then fed into EFN [122]. For further EFN details, refer to CFSR [122].", + "bbox": [ + 89, + 869, + 483, + 901 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "While CFSR [122] employs a $3 \\times 3$ convolution tail for deep feature extraction, it has limitations in establishing long-range connections, restricting the representational capability of reconstructed features. To overcome this, they propose LSKAT inspired by the large kernel attention tail(LKAT) [119], as depicted in Fig. 28.", + "bbox": [ + 511, + 376, + 903, + 467 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Training Details. Their approach leverages DIV2K[103], Flickr2K[70], and the first 10K portion of LSDIR[64]. In each RMAB, the number of channels, RMABs, and MAALs are set to 48, 3, and 2-3-2, respectively. During training, they used 256 HR RGB patches with a batch size of 64. Data augmentation included random flips and rotations. Parameters are optimized using the L1 loss and the Adam optimizer[54]. The learning rate started at $1 \\times 10^{-3}$ and decreasing to $1 \\times 10^{-6}$ using a cosine annealing scheduler. The network is trained for 1,000K iterations, implemented in PyTorch, and executed on an NVIDIA RTX 3090 GPU.", + "bbox": [ + 511, + 468, + 906, + 648 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "4.26.MILA", + "text_level": 1, + "bbox": [ + 513, + 664, + 607, + 678 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "General Method Description. As shown in Figure 29, inspired by the efficient approximation of self-attention (EASA) [144], they introduce local variance and design LVSA. Additionally, inspired by MDRN [81] and AGDN [114], they consider the impact of multi-level branches on performance. Therefore, they design a multi-level variance feature modulation block that incorporates non-local information with local variance perception at two different levels. This design aims to better leverage the interplay between local and non-local features while balancing performance and model complexity.", + "bbox": [ + 511, + 686, + 905, + 853 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "The gated-dconv feed-forward network (GDFN) [132] introduces gating mechanism and depth-wise convolutions to encode information from spatially adjacent pixel posi", + "bbox": [ + 511, + 854, + 905, + 901 + ], + "page_idx": 27 + }, + { + "type": "page_number", + "text": "28", + "bbox": [ + 488, + 924, + 506, + 936 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "tions, which is highly useful for learning local image structures to achieve effective restoration. However, the single gating structure is relatively simple and cannot effectively capture and blend local contextual information. Therefore, they propose the symmetric gated feed-forward network.", + "bbox": [ + 89, + 90, + 480, + 167 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Training Description. The proposed MVFMNet has 6 FMMs, in which the number of feature channels is set to 26. The details of the training steps are as follows:", + "bbox": [ + 89, + 176, + 482, + 222 + ], + "page_idx": 28 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. Pretraining on the DF2K and the first 1k images of LSDIR datasets. HR patches of size $256 \\times 256$ are randomly cropped from HR images, and the mini-batch size is set to 64. The model is trained by minimizing L1 loss and the frequency loss [14] with Adam optimizer for total 100k iterations. They set the initial learning rate to $1 \\times 10^{-3}$ and the minimum one to $1 \\times 10^{-6}$ , which is updated by the Cosine Annealing scheme [78].", + "2. Finetuning on the DF2K and the first 1k images of LSDIR datasets. HR patch size and mini-batch size are set to $256 \\times 256$ and 64, respectively. The model is fine-tuned by minimizing the L2 loss function. The learning rate is initialized at $2 \\times 10^{-5}$ and gradually decreased to $1 \\times 10^{-8}$ over 500k iterations using the Cosine Annealing scheme." + ], + "bbox": [ + 84, + 231, + 480, + 443 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "4.27. AiMF_SR", + "text_level": 1, + "bbox": [ + 89, + 481, + 210, + 494 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Method Details. They propose a novel Mixture of Efficient Attention (MoEA) architecture for efficient superresolution tasks. The architecture includes a shallow feature extractor, multiple Feature Representation Modules (FRMs), and an efficient reconstruction and upsampling module. Initially, a shallow $3 \\times 3$ convolutional layer reduces computational load, generating compact feature representations. Deep feature extraction employs transformer-inspired blocks with pre-normalization, incorporating Mixture-of-Experts (MoE) Blocks [131] for efficient attention and Depth Feed Forward Networks (DepthFFN) for capturing depth-wise interactions. Details of the architecture can be seen in Fig. 30.", + "bbox": [ + 89, + 512, + 483, + 709 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "The MoEBlock consists of two parallel feature pathways (Fig. 30). The input features $x$ are first projected into two distinct feature sets $x_{a}$ and $x_{b}$ using a pointwise convolution. The first branch, $x_{a}$ , undergoes both adaptive average and max pooling followed by depth-wise convolutions. The pooling is done in scale of 8 [145]. These pooling layers followed by depth-wise convolutions serve as efficient attention-like mechanism. Then, it combines these features through element-wise addition, nonlinear activation (GELU), and interpolation. The second branch, $x_{b}$ , is processed via depth-wise and pointwise convolutions with GELU activation.", + "bbox": [ + 89, + 719, + 483, + 900 + ], + "page_idx": 28 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} x _ {a} = \\operatorname {D W C o n v} \\left(\\operatorname {A v g P o o l} \\left(x _ {a}\\right)\\right) + \\operatorname {D W C o n v} \\left(\\operatorname {M a x P o o l} \\left(x _ {a}\\right)\\right), \\\\ x _ {a} ^ {\\prime} = \\mathcal {U} (\\mathcal {G} (\\operatorname {P W C o n v} (x _ {a}))), \\\\ x _ {a} ^ {\\prime} = \\operatorname {P W C o n v} \\left(x _ {a} ^ {\\prime}\\right), \\\\ x _ {b} ^ {\\prime} = \\mathcal {G} (\\operatorname {P W C o n v} (\\operatorname {D W C o n v} (x _ {b}))), \\\\ x _ {a b} = \\mathcal {C} \\left(x _ {a} ^ {\\prime}, x _ {b} ^ {\\prime}\\right). \\tag {25} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 514, + 127, + 911, + 232 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "where $x_{a}, x_{b}$ are concatenated and passed through the Router (gating network), $\\mathcal{R}$ , which adaptively selects the top- $k$ expert paths based on the channel-wise global average-pooled features in the MoE-layer. Each selected expert independently processes $x_{a}'$ and $x_{b}'$ through pointwise convolutions, multiplies them element-wise, and applies a final convolution for feature integration:", + "bbox": [ + 511, + 237, + 903, + 343 + ], + "page_idx": 28 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\operatorname {l o g i t s} = \\mathcal {R} (x _ {a b}), \\\\ x _ {a} ^ {\\prime}, x _ {b} ^ {\\prime} = \\operatorname {T o p K} (\\operatorname {S o f t m a x} (\\log_ {i} i)) \\\\ \\operatorname {E x p e r t} \\left(x _ {a} ^ {\\prime}, x _ {b} ^ {\\prime}\\right) = \\operatorname {P W C o n v} \\left[ \\operatorname {P W C o n v} \\left(x _ {a} ^ {\\prime}\\right) \\times \\operatorname {P W C o n v} \\left(x _ {b} ^ {\\prime}\\right) \\right] \\tag {26} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 516, + 385, + 903, + 450 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Multiple FRMs (LayerNorm-MoEBlock-LayerNorm-DepthFFN sequences) are stacked for deep feature extraction. For reconstruction, global contextual features from deep extraction combine with shallow features via residual connections, followed by PixelShuffle-based upsampling to produce high-resolution outputs. The model uses GELU activation, Layer Normalization. Their MoE layer dynamically routes features across numExperts $= 3$ , selecting the top $k = 1$ experts at training time, allowing a flexible and adaptive processing pipeline tailored specifically to input feature characteristics.", + "bbox": [ + 511, + 457, + 905, + 622 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Training Strategy. The model is trained and tested on BasicSR [115] setting. First, the model is initially trained on DIV2K_LSDIR_x2, then further finetuned with DIV2K_LSDIR_x3 dataset for 500,000 iterations respectively, in which these scales are made with bicubic downsampling. The x4 scale model is finetuned on top of the x3 model over 500,000 iterations with the initial learning rate of $1 \\times 10^{-3}$ using the Adam optimizer. The learning rate decayed at iterations [250,000, 400,000, 450,000, 475,000]. The training pipeline included data augmentations such as random horizontal flips, vertical flips and rotations. The model is optimized using L1 Loss and Fast Fourier Transform (FFT) Loss [95] with 1.0 and 0.1 weights, respectively. All reported implementations are carried out using Python (version 3.9) programming language and PyTorch Framework, utilizing one RTX4090, 24GB VRAM and 16-core CPU. Training is conducted over approximately 23 days with a single GPU of batch size of 16.", + "bbox": [ + 511, + 628, + 906, + 900 + ], + "page_idx": 28 + }, + { + "type": "page_number", + "text": "29", + "bbox": [ + 488, + 924, + 506, + 936 + ], + "page_idx": 28 + }, + { + "type": "image", + "img_path": "images/c40b65ab994e2395bc7a92b4cda211ba91920ed236cf7a59b3f243698618b855.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 99, + 89, + 898, + 198 + ], + "page_idx": 29 + }, + { + "type": "image", + "img_path": "images/01eacf3cccd113c242f920e122e9dd250100f74c8f5e7f603cf5c928a27f10c7.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 99, + 200, + 898, + 348 + ], + "page_idx": 29 + }, + { + "type": "image", + "img_path": "images/a2c01cfe6e304254748277a8f823d85316e6c8e8774d19c15de4428bf13a838f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 99, + 349, + 897, + 436 + ], + "page_idx": 29 + }, + { + "type": "image", + "img_path": "images/f872827f0bbe062c88b104ed0c499d216e8b302b4a8aa46faf3793bedbc4cf18.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 127, + 441, + 158, + 465 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Adaptive Max Pooling", + "bbox": [ + 163, + 448, + 289, + 460 + ], + "page_idx": 29 + }, + { + "type": "image", + "img_path": "images/50d512d971059ff34fadef4244b80d50193d8309a9a13d88ca8dfe1dfda61946.jpg", + "image_caption": [ + "Figure 29. Team MILA: Network architecture of the proposed MVFMNet." + ], + "image_footnote": [], + "bbox": [ + 326, + 441, + 357, + 465 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Local Variance", + "bbox": [ + 359, + 449, + 447, + 460 + ], + "page_idx": 29 + }, + { + "type": "image", + "img_path": "images/47a8f5767564f21bec0d29e6ae704dea3d6b3837b42286dffc841e399e9070d8.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 496, + 441, + 526, + 465 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Channel Concatenate", + "bbox": [ + 532, + 449, + 656, + 460 + ], + "page_idx": 29 + }, + { + "type": "image", + "img_path": "images/12903d3b3084e11b5f896b87149ea6ab71e21ca200bfcd42111d265b7224d8f0.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 694, + 441, + 725, + 465 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Element-wise Addition", + "bbox": [ + 730, + 449, + 856, + 459 + ], + "page_idx": 29 + }, + { + "type": "image", + "img_path": "images/417ed3a4f6c7a940301d0c59980746b58cddeb2e843610fa81ff47d3e406ebca.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 127, + 470, + 158, + 492 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Nearest Up-sampling", + "bbox": [ + 165, + 474, + 284, + 487 + ], + "page_idx": 29 + }, + { + "type": "image", + "img_path": "images/2d645b7edb13434af06462871050d1113b7f8262449bfe3d9336cd691170f68d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 326, + 470, + 357, + 492 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Chanel Split", + "bbox": [ + 362, + 476, + 431, + 487 + ], + "page_idx": 29 + }, + { + "type": "image", + "img_path": "images/5c4fffafcd87b05c0eb660d6ac051e73323321b85c6274fe2d1d425f63153e14.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 496, + 470, + 526, + 492 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "GELU Activation", + "bbox": [ + 532, + 476, + 625, + 486 + ], + "page_idx": 29 + }, + { + "type": "image", + "img_path": "images/7b7ae85f98846211c1488657fe8c48f7df95a130ea033e4222c2cc126ad7abe3.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 694, + 468, + 725, + 492 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Element-wise Product", + "bbox": [ + 730, + 476, + 854, + 486 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "4.28. BVIVSR", + "text_level": 1, + "bbox": [ + 89, + 554, + 205, + 569 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Method Description. Their solution is built on the advances in state-of-the-art single-image super-resolution (SISR) methods [11, 18, 87, 141, 149], particularly the efficient Transformer-based models [52, 139], the continuous super-resolution approaches, such as HiIF [49, 52], and the knowledge distillation strategies [48, 50, 51]. They employ an efficient Transformer-based network architecture, as illustrated in Fig. 31, where the core component is the Hierarchical Encoding Transformer (HiET) layer. The HiET layer was first proposed in [52] and it is specifically designed to capture rich structural dependencies across various regions of the image, enabling the model to handle complex visual patterns effectively. To enhance the capacity of the model for multi-scale feature representations, each HiET layer is set with different window sizes, allowing it to attend to both local and global contexts. Furthermore, the overall architecture incorporates a modified U-Net structure, where skip connections are introduced between symmetric HiET layers at different depths. This design facilitates efficient multi-level feature fusion and ensures better preservation and reconstruction of fine-grained details", + "bbox": [ + 89, + 583, + 483, + 900 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "in the super-resolved outputs. In addition, they also apply the multi-teacher knowledge distillation strategy [48] to improve the performance of the lightweight C2D-ISR model, where SRFormer [147], MambaIR [32] and EDSR [70] are employed as teacher networks.", + "bbox": [ + 511, + 555, + 906, + 630 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Training Details. They use the DIV2K [102], 1000 2K images from BVI-AOM [82], Flickr2K [70] and 5000 images from LSDIR[64] as training dataset. For evaluation, they follow common practice and employ the DIV2K validation set (containing 100 images) [102]. The maximum learning rate is set to $4 \\times 10^{-4}$ . The learning rate follows a cosine annealing schedule, gradually decreasing after an initial warm-up phase of 50 epochs. They use L1 loss and the Adam [54] optimization during training. Training and testing are implemented based on 4 NVIDIA 4090 GPUs. The model comprises 154.8K parameters with an input size of $64 \\times 64 \\times 3$ and it was trained for 1000 epochs with 16 batch sizes per GPU. The training of their solution contains five stages:", + "bbox": [ + 511, + 633, + 908, + 845 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "- Training the teacher networks, including SRFormer [147], MambaIR [32] and EDSR [70], by using the original settings in their papers;", + "bbox": [ + 511, + 854, + 908, + 900 + ], + "page_idx": 29 + }, + { + "type": "page_number", + "text": "30", + "bbox": [ + 488, + 924, + 508, + 936 + ], + "page_idx": 29 + }, + { + "type": "image", + "img_path": "images/253833a7e355d218a6f8858267ec14826e3afdeb8733a9e206f03c2e38f8543b.jpg", + "image_caption": [ + "Figure 30. Team AiMF_SR: Main Figure of Proposed Architecture, Mixture of Efficient Attention." + ], + "image_footnote": [], + "bbox": [ + 133, + 126, + 867, + 474 + ], + "page_idx": 30 + }, + { + "type": "image", + "img_path": "images/6ddd8335873af8c5a39067d510e2bb84a138cae90a3d4c71cc8cbfc3b65e5ffc.jpg", + "image_caption": [ + "Figure 31. Team BVIVSR: The structure of the method." + ], + "image_footnote": [], + "bbox": [ + 145, + 507, + 869, + 638 + ], + "page_idx": 30 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- The teacher aggregation of multi-teacher knowledge distillation (MTKD) strategy [48] was adapted to the above teacher networks to obtain an enhanced teacher network;", + "- Training the lightweight C2D-ISR model [52] on continuous scales i.e, from $\\times 2$ to $\\times 4$ , to learn the correlation between multiple scales and better recover high-frequency details;", + "- The learned C2D-ISR model was distilled by the MTKD strategy [48] with their enhanced teacher network to obtain the enhanced student model;", + "- Finetuning the enhanced student model by increasing the patch size from $64 \\times 64$ to $128 \\times 128$ ." + ], + "bbox": [ + 89, + 676, + 482, + 858 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "4.29.CUIT_HTT", + "text_level": 1, + "bbox": [ + 513, + 676, + 648, + 690 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "General Method Description. The overall architecture of the proposed method is illustrated in Fig. 32(a), which consists of three main components: the shallow feature extraction module, the deep feature extraction module, and the reconstruction and upsampling module. The shallow feature extraction module employs a BSConv [34] module to extract low-level features such as edges and textures from the input image $I^{in} \\in \\mathbb{R}^{3 \\times H \\times W}$ , mapping it to the feature space $f^0 \\in \\mathbb{R}^{C \\times H \\times W}$ for further processing. The extracted shallow features are then fed into the deep feature extraction module, which is composed of multiple Frequency-Segmented Attention Blocks (FSABs) designed in this work. The outputs of each FSAB are concatenated", + "bbox": [ + 511, + 704, + 906, + 900 + ], + "page_idx": 30 + }, + { + "type": "page_number", + "text": "31", + "bbox": [ + 488, + 924, + 506, + 936 + ], + "page_idx": 30 + }, + { + "type": "image", + "img_path": "images/f515d21064efcb5f99d823286f26e5e7ffc92eccff0a11ad3be8c813baca6d94.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 96, + 94, + 867, + 214 + ], + "page_idx": 31 + }, + { + "type": "image", + "img_path": "images/e71224c1294caaddfff3a7868d78eb34cb3399b9a3e70cfae73f0f5093a5bc12.jpg", + "image_caption": [ + "Figure 32. Team CUIT_HT: Schematic Diagram of the Method. (a) Overall Architecture of the Model; (b) Frequency-Segmented Attention Block (FSAB); (c) Schematic of the Enhanced Large-kernel Convolution Block (ELCB); (d) Mechanism of Frequency-Segmented Attention (FSA); (e) Frequency Division and Frequency Recombination." + ], + "image_footnote": [], + "bbox": [ + 94, + 215, + 259, + 444 + ], + "page_idx": 31 + }, + { + "type": "image", + "img_path": "images/cb4d48375367e6c3831213b678c17fc76e97295cb24d5c23662571f9e2f19896.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 261, + 218, + 450, + 439 + ], + "page_idx": 31 + }, + { + "type": "image", + "img_path": "images/0bdef5b2be98fa47128c19bf1077c8c10251c4776405671e6a96c0507aeb5481.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 452, + 218, + 867, + 311 + ], + "page_idx": 31 + }, + { + "type": "image", + "img_path": "images/e8b1e59898d3b68eb6fd482318b1fd061e804edafc865f2b99d918a5991b38f5.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 452, + 313, + 867, + 443 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "along the channel dimension and adjusted using a convolutional module group, constituting the deep feature extraction process. As shown in Fig. 32(b), the FSAB structure includes a Concat operation for channel concatenation and a ConvB module group, which consists of a $1 \\times 1$ convolution, a GELU activation function, and a BSCov stacked sequentially. Finally, the output of the shallow feature extraction module is added element-wise to the output of the deep feature extraction module via a skip connection and passed to the reconstruction and upsampling module. This module upsamples the feature space information $f^{out} \\in \\mathbb{R}^{C \\times H \\times W}$ and maps it to the high-resolution output image $I^{SR} \\in \\mathbb{R}^{3 \\times scale \\times H \\times scale \\times W}$ , where scale is the upscaling factor. In this work, the PixelShuffle method is utilized for upsampling.", + "bbox": [ + 88, + 521, + 480, + 748 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "The Frequency-Segmented Attention Block (FSAB) primarily consists of an information distillation architecture for local feature processing and the proposed Frequency-Segmented Attention (FSA) mechanism for global feature processing. The overall architecture of FSA is illustrated in Fig. 32 (d). The input feature map is first transformed into the frequency domain via the Fast Fourier Transform (FFT), enabling global processing in the spatial domain through frequency domain operations. Inspired by windowed attention, the FDivision operation partitions the frequency spec", + "bbox": [ + 88, + 750, + 482, + 901 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "trum into multiple windows, which are concatenated along the channel dimension. A grouped convolution is then applied to process features in different frequency ranges using distinct weights. Subsequently, the FRecombination operation reassembles the segmented frequency windows back into the spectrum. A convolutional layer is applied, and the result is added element-wise to the original spectrum. Finally, the Inverse Fast Fourier Transform (IFFT) is used to convert the processed features back to the spatial domain, and the output is obtained through elementwise multiplication with the original input. As for the information distillation architecture, they adopt the structure of the Residual Feature Distillation Block (RFDB) from RFDN [71], as shown in Fig. 32. (b). However, they replace the convolutional layers with Enhanced Large-kernel Convolution Blocks (ELCB). This module employs large-kernel depthwise convolution on half of the channels and pointwise convolution on the full channels, achieving a large receptive field without significantly increasing the number of parameters. Additionally, structural reparameterization is utilized during training, where multiple branches with different receptive fields are employed. During inference, these branches are equivalently replaced with a single large-kernel convolution module, thereby enhancing the model's learning capability without increasing inference cost.", + "bbox": [ + 511, + 521, + 906, + 900 + ], + "page_idx": 31 + }, + { + "type": "page_number", + "text": "32", + "bbox": [ + 488, + 924, + 508, + 936 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "Train details. They utilize the DIV2K [4] and Flickr2k [101] dataset and the first 10K images from the LSDIR [64] dataset as the training set for their model. During training, the dataset undergoes random horizontal flipping and $90^{\\circ}$ rotation. The mini-batch size and input patch size are set to 64 and $64 \\times 64$ , respectively. The model is optimized using the L1 loss function and the Adam optimizer, with an initial learning rate of $5 \\times 10^{-3}$ . The learning rate follows a cosine annealing decay schedule over a total of 1000K iterations. Subsequently, the model is fine-tuned using the L2 loss to achieve improved performance. Training is conducted using PyTorch 1.12.1 on a Tesla P100 16G GPU.", + "bbox": [ + 89, + 90, + 480, + 287 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "4.30. GXZY.AI", + "text_level": 1, + "bbox": [ + 89, + 300, + 215, + 314 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "General Method Description. The GXZY AI team proposed a Parameter-free Vision Mamba, as shown in Fig. 33. The work is inspired by MambaIR [33], SPAN [112] and DVMSR [59], PFVM consists of three parts, shallow feature extraction, deep feature extraction and reconstruction module. Shallow feature extraction is achieved by $3 \\times 3$ convolution, followed by the use of stacked Residue State Space Blocks (RSSBs), which contain the Vision State Space Module (VSSM) to extract deeper features through the capability of Mamba long-range modeling. Then the shallow and deep features are aggregated by a $3 \\times 3$ convolution along with residual concatenation, and finally upsampling is achieved through a sub-pixel convolutional layer to reconstruct the high resolution image.", + "bbox": [ + 89, + 323, + 482, + 532 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "As shown in Fig. 34, different from the RSSB used in DVMSR, PFVM does not use stacked ViMM modules, but follows the design paradigm of the RSSB in MambaIR, which differs from MambaIR in that 3-residue branching is used in order to maximize the ability of residual learning. In order to obtain better PSNR with approximate inference time, the convolution layer adopts the bottleneck structure, and the channel attention used in MambaIR is replaced by a parameter-free attention.", + "bbox": [ + 89, + 536, + 482, + 671 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "Training Strategy. In the training phase, the GXZY AI team uses the LSDIR [64] dataset for training and the DIV2K [3] validation set for validation. The images in the training set are first cropped with a step size of 240 and a size of 480 to get a series of cropped images. The model was trained on 2 NVIDIA RTX 3090 GPUs. The details of the training steps are as follows:", + "bbox": [ + 89, + 672, + 482, + 779 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "1. The HR images are randomly cropped to size 192, and the dataset is augmented using random flipping and rotation. The model is trained from scratch with a batch size set to 64, using the Adam optimizer with the learning rate set to 0.0001, $\\beta_{1} = 0.9$ , $\\beta_{2} = 0.99$ , and a Multi-StepLR scheduler with the learning rate halved for every 200,000 iterations for a total of 1,000,000 iterations. The loss function uses L1 loss.", + "bbox": [ + 91, + 780, + 482, + 900 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "2. On the basis of the first step, the model with the optimal PSNR on the DIV2K validation set is loaded as the pre-training model, the size of HR image cropping is adjusted to 256, the learning rate is 0.0002, the learning rate is halved for every 100,000 iterations, and the loss function is still used for 1,000,000 iterations with L1 loss.", + "bbox": [ + 513, + 90, + 903, + 196 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "4.31. IPCV", + "text_level": 1, + "bbox": [ + 511, + 208, + 602, + 223 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "This team uses HiT-SR: Hierarchical Transformer for Efficient Image Super-Resolution [140] for this challenge. The Hierarchical Transformer for Efficient Image Super-Resolution (HiT-SR) is a deep learning model designed to upscale low-resolution (LR) images into high-resolution (HR) outputs while maintaining efficiency and high-quality reconstruction. Unlike traditional convolutional neural networks (CNNs), which struggle to capture long-range dependencies, HiT-SR employs a hierarchical self-attention mechanism that efficiently processes multiscale image features. This allows the model to integrate local and global information, improving image detail reconstruction while reducing computational costs.", + "bbox": [ + 511, + 229, + 903, + 426 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "At the core of the network is a hierarchical feature learning process, where image features are extracted and refined progressively through multiple stages. Instead of applying full-resolution self-attention, which is memory intensive, HiT-SR reduces token complexity using patch merging and downsampling modules, allowing efficient computation without loss of essential information. The model further refines these hierarchical features through multiscale self-attention mechanisms, ensuring that fine-grained details and global structures are effectively captured.", + "bbox": [ + 511, + 428, + 905, + 578 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "For the final super-resolution reconstruction, HiT-SR aggregates and progressively upsamples the processed features. This multistage refinement approach ensures that high-frequency details are preserved while preventing artifacts common in naive upsampling techniques. The resulting HR image maintains sharp edges, realistic textures, and minimal distortions. They have used available pre-trained model weights [134] on the low resolution images of the test data set and predicted high resolution images.", + "bbox": [ + 511, + 579, + 905, + 715 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "4.32. X-L", + "text_level": 1, + "bbox": [ + 511, + 726, + 589, + 741 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "General Method Description. Their proposed partial permuted self-attention network (PPSA-Net) is shown in Fig. 35. PPSA-Net is inspired by two works: SR-Former [147] and PartialConv [9]. SRFormer is a lightweight super-resolution (SR) approach, but it inevitably still has significant redundancy in feature dimensions. To address this, they combine the strengths of PartialConv to further reduce the complexity and the computational cost. Specifically, they use a feature encoder to process the low-resolution image and feed it to four partial per", + "bbox": [ + 511, + 750, + 905, + 900 + ], + "page_idx": 32 + }, + { + "type": "page_number", + "text": "33", + "bbox": [ + 488, + 924, + 506, + 935 + ], + "page_idx": 32 + }, + { + "type": "image", + "img_path": "images/41a39eaf7e655d5f51d27cf6b8fee4f71730fb177f34e3c0bae53e75a366f369.jpg", + "image_caption": [ + "Figure 33. Team GXZY.AI: The structure of PFVM." + ], + "image_footnote": [], + "bbox": [ + 217, + 104, + 784, + 335 + ], + "page_idx": 33 + }, + { + "type": "image", + "img_path": "images/04628618f2817ad511739ed0c8edd934bec4a877efa1648b1428c4283d5c12db.jpg", + "image_caption": [ + "Figure 34. Team GXZY AI: The structural details of MambaIR and DVMSR." + ], + "image_footnote": [], + "bbox": [ + 215, + 376, + 782, + 616 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "muted self-attention (PPSA) layers, before finally feeding it into a feature decoder to obtain the final result. In more detail, within each PPSA layer, they use channel split to divide the original features into two sub-features: one comprising $1/4$ of the channels and the other comprising $3/4$ of the channels. The $1/4$ sub-feature is processed by a permuted self-attention block [147], while the $3/4$ sub-feature remains unchanged. After processing, the two sub-features are concatenated back together. This design allows us to efficiently reduce computational overhead while maintaining the model's ability to capture both local and global information, leading to high-quality SR results.", + "bbox": [ + 89, + 676, + 483, + 858 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Training details. They follow the same training procedure as SRFormer [147]. However, they conduct their training", + "bbox": [ + 89, + 869, + 483, + 902 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "using a single NVIDIA 4090 GPU.", + "bbox": [ + 513, + 676, + 746, + 691 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "4.33.Quantum_Res", + "text_level": 1, + "bbox": [ + 511, + 709, + 668, + 724 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Method Details. In this work, they propose a novel student-teacher framework for super-resolution, as shown in Fig. 36 that enables a lightweight student model to achieve better performance comparable to heavier models. Specifically, to adopt this architecture, they used MambaIRv2-Light [32] as the student model, while MambaIRv2-base [32] serves as the teacher. While they use MambaIRv2-light as an efficiency, their key contribution is demonstrating that a guided student-teacher learning strategy can significantly improve SR performance while keeping model complexity low. [108]", + "bbox": [ + 511, + 734, + 906, + 900 + ], + "page_idx": 33 + }, + { + "type": "page_number", + "text": "34", + "bbox": [ + 488, + 924, + 508, + 936 + ], + "page_idx": 33 + }, + { + "type": "image", + "img_path": "images/496abd7bd595907443e0670187e23f1f4d56c40d11a3074d787694ea1ce40318.jpg", + "image_caption": [ + "Figure 35. Team X-L: Overview of the proposed PPSA-Net." + ], + "image_footnote": [], + "bbox": [ + 169, + 88, + 831, + 229 + ], + "page_idx": 34 + }, + { + "type": "image", + "img_path": "images/5f12b3c080f9cb9562c8a1755391e2e1c37748366d569130074f4dda3ff55992.jpg", + "image_caption": [ + "Figure 36. Team Quantum_Res: The overall pipeline of efficient super-resolution approach, which employs a student-teacher training paradigm. The high-capacity Teacher Network (MambaIRv2-B) learning is transferred to the lightweight Student Network (MambaIRv2-Light) using knowledge distillation. The student network is optimized using L1 loss to ensure accurate superresolution while maintaining efficiency. The input low-resolution (LR) database serves as the training input, guiding the student model to achieve high-fidelity reconstruction with reduced computational complexity." + ], + "image_footnote": [], + "bbox": [ + 93, + 277, + 483, + 393 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "The student model extracts the initial low-level features from the input low-resolution image using the $3 \\times 3$ convolutional layer. The core of the network comprises a series of Attentive State-Space Blocks (ASSBs) [32] to capture long-range dependencies efficiently. For each block, residual connections are used to facilitate stable gradient propagation. Finally, a pixel-shuffle-based upsampling module reconstructs the final high-resolution image. [32]", + "bbox": [ + 89, + 560, + 482, + 681 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "The teacher model, MambaIRv2, follows the same architectural design but with increased depth and wider feature dimensions. This model has significantly more parameters and serves as an upper-bound reference for the student.", + "bbox": [ + 89, + 683, + 482, + 744 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Teacher-Guided Inference. The teacher model remains frozen throughout training and is only used as a qualitative reference to validate architectural choices and improvements. The student model inherits refined architectural principles from the teacher rather than weight transfer or feature alignment. This allows the student to retain its original lightweight nature while benefiting from structural knowledge obtained from a larger-capacity model [108].", + "bbox": [ + 89, + 746, + 482, + 867 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Inference Strategy. During inference, an efficient patch-based processing method is applied to handle high-", + "bbox": [ + 89, + 869, + 483, + 901 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "resolution images. Given an input image, it is divided into overlapping patches. Each patch is processed independently by the student network, and final predictions are blended using a weighted averaging scheme to ensure seamless reconstruction. [32]", + "bbox": [ + 511, + 281, + 903, + 356 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Training Details. The student model is initialized using pre-trained weights of MambaIRv2-light. The teacher model is loaded with pre-trained weights from a high-performing MambaIRv2-base variant. Fine-tuning was performed on DIV2K and LSDIR, with the number of feature channels set to 48. The training was conducted on patches of size $192 \\times 192$ extracted from high-resolution images, using a batch size of 8. The model is finetuned by minimizing the L1 loss function using the Adam optimizer. The initial learning rate is set to $1 \\times 10^{-5}$ and is reduced when training iterations reach specific milestones, following a Multi-StepLR decay strategy with a factor of 0.5. The total number of iterations is 150K. The teacher model is only used as a reference for guiding architectural refinement and remains frozen throughout the training.", + "bbox": [ + 511, + 357, + 906, + 583 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "4.34. SylabSR", + "text_level": 1, + "bbox": [ + 513, + 590, + 627, + 606 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Method. Inspired by RLFN [56] and VARSR [88], they propose an AutoRegressive Residual Local Feature Network (AR-RLFN) to implement a two-stage super-resolution framework. Specifically, they build a lightweight version of RLFN targeting $2 \\times$ super-resolution, meaning that the final $4 \\times$ SR image is generated from an intermediate $2 \\times$ SR image produced by the same model. The overall framework of AR-RLFN is shown in Fig. 37. Although the model needs to be run twice, the $2 \\times$ SR task requires significantly fewer parameters and FLOPs compared to the original one, making the approach efficient overall.", + "bbox": [ + 511, + 613, + 905, + 779 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "The modified structure of RLFN is further inspired by R2Net [91]. Benefiting from the two-stage strategy, their model is able to operate with fewer parameters. In their framework, they adopt three Residual Local Feature Blocks (RLFBs) with a reduced number of channels compared to the original version. Additionally, they replace ReLU with LeakyReLU to mitigate gradient vanishing. For reparameterization, they employ the Residual-in-Residual Rep Block", + "bbox": [ + 511, + 779, + 906, + 900 + ], + "page_idx": 34 + }, + { + "type": "page_number", + "text": "35", + "bbox": [ + 488, + 924, + 508, + 936 + ], + "page_idx": 34 + }, + { + "type": "image", + "img_path": "images/17821671883df0836f9272ab356f13d8f4e20543a0f3382103eb12be5c27e5b0.jpg", + "image_caption": [ + "Figure 37. Team SylabSR: The structure of (up) AR-RLFN, (a) RLFB, (b) RRRB and (c) its reparameterization." + ], + "image_footnote": [], + "bbox": [ + 96, + 90, + 480, + 353 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "(RRRB) [26] for improved compression, which reduces the number of parameters during inference by approximately $45\\%$ .", + "bbox": [ + 89, + 425, + 482, + 469 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "Training Strategy. They train their network on DIV2K [104] and LSDIR [64] datasets, and augment the training data using random flipping and rotation. The training process is divided into three stages:", + "bbox": [ + 89, + 470, + 483, + 532 + ], + "page_idx": 35 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. HR patches of size $512 \\times 512$ are randomly cropped from the ground truth DIV2K images. In this stage, the model performs $2 \\times$ super-resolution. The number of channels in the RRRB is set to 12, and the batch size is set to 32. They use the Adam optimizer to minimize the Charbonnier loss, with the learning rate set to $5\\mathrm{e}^{-4}$ . The training runs for 100k iterations, and the learning rate is halved every 20k iterations.", + "2. HR patches of size $256 \\times 256$ are randomly cropped from the ground truth DIV2K images. The model again performs $2 \\times$ super-resolution in this stage. The remaining configurations are the same as in Stage 1.", + "3. HR patches of size $512 \\times 512$ are randomly cropped from both the DIV2K and LSDIR datasets. In this stage, they use the Adam optimizer to minimize MSE loss, with the learning rate set to $2\\mathrm{e}^{-4}$ . The training runs for 50k iterations, and the learning rate is halved every 10k iterations." + ], + "bbox": [ + 91, + 532, + 483, + 789 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "4.35. NJUPCA", + "text_level": 1, + "bbox": [ + 89, + 801, + 207, + 816 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "General Method Description. Inspired by SPAN [112], they propose the Spatial Frequency Network (SFNet), which fully leverages both spatial and frequency domain representations. SFNet integrates Frequency Knowledge Miner (FKM) modules after each Spatial Attention Block", + "bbox": [ + 89, + 824, + 483, + 900 + ], + "page_idx": 35 + }, + { + "type": "image", + "img_path": "images/b1af22d432546be3d58f726de7b0d76a5692472560a730d2902ecb22dbc465ac.jpg", + "image_caption": [ + "Figure 38. Team NJUPCA: The detailed architecture of the designed FKM." + ], + "image_footnote": [], + "bbox": [ + 524, + 90, + 895, + 220 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "(SPAB) to capture frequency domain features, complementing the spatial features extracted by SPAB. This parallel design enables the network to effectively learn and combine spatial and frequency domain representations, enhancing the performance of super-resolution reconstruction.", + "bbox": [ + 511, + 275, + 903, + 349 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "As illustrated in Fig. 38, the frequency knowledge miner (FKM) is designed to learn frequency representation from input, which comprises two core components: multi-band frequency learner (MBFL) and full-frequency adjustment learner (FFAL). MBFL aims to enhancing frequency representation by focusing on distinct frequency bands, while FFAL adjusts frequency-domain features from a full-frequency perspective.", + "bbox": [ + 511, + 351, + 905, + 470 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "Training Details. They employ two-stage training paradigm:", + "bbox": [ + 511, + 470, + 903, + 502 + ], + "page_idx": 35 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- **Stage I - Foundation Training:** Randomly initialized weights are trained on DIV2K and full LSDIR datasets using $128 \\times 128$ HR patches. Configuration: Adam optimizer ( $\\beta_{1} = 0.9$ , $\\beta_{2} = 0.999$ ) with L1 loss, initial learning rate $5 \\times 10^{-4}$ (halved every 200 epochs), batch size 64 over 1,000 epochs (34 hours on $4 \\times$ NVIDIA A6000).", + "- Stage II - Refinement: Initialized with Stage I weights, fine-tuned using DIV2K and LSDIR subset. Configuration: L2 loss with cosine learning schedule ( $\\eta_{\\mathrm{initial}} = 1 \\times 10^{-4}$ ), 500 epochs." + ], + "bbox": [ + 513, + 503, + 903, + 655 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "Other details: Training employed standard data augmentation (random rotation and flipping) without additional regularization techniques.", + "bbox": [ + 511, + 657, + 903, + 702 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "4.36. DepthIBN", + "text_level": 1, + "bbox": [ + 511, + 712, + 637, + 728 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "Single Image Super-Resolution (SISR) still faces challenges such as a large number of parameters, high memory consumption, and slow training and inference speed, despite significant advancements. These issues limit the practical use of SISR methods in real-world scenarios. Therefore, recent research has focused on developing lightweight models and optimizing network architectures. Among these techniques, Information Distillation is used to extract important features by splitting channels [43, 45, 67, 71]. One of the main challenges of CNNs is the high computational cost of convolution operations. To reduce this cost,", + "bbox": [ + 511, + 734, + 906, + 900 + ], + "page_idx": 35 + }, + { + "type": "page_number", + "text": "36", + "bbox": [ + 488, + 924, + 508, + 936 + ], + "page_idx": 35 + }, + { + "type": "image", + "img_path": "images/2c3df1452537de9ebf29c6ff074f05d09538ce0acd4db76b079e71e1940d1cb2.jpg", + "image_caption": [ + "Figure 39. Team DepthIBN: Involution and BSConv Multi-Depth Distillation Block (IBMDB)." + ], + "image_footnote": [], + "bbox": [ + 93, + 88, + 472, + 281 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "the Depthwise Separable Convolution (DSConv) [40, 135] method was introduced, but due to the separate processing of channels, some information may be lost. To address this issue, BSCov optimizes feature processing by utilizing kernel correlations, improving performance and reducing computations [34]. Furthermore, shown in Fig. 39, Involution replaces fixed filters with pixel-dependent dynamic filters, making it more sensitive to spatial variations and better at capturing long-range dependencies between pixels [60]. Involution not only reduces parameters and resource consumption but also provides better performance compared to convolution-based models due to its superior feature extraction capability.", + "bbox": [ + 88, + 354, + 482, + 549 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "Method. They used the IBMDN model in this challenge, following previous studies in the field of Lightweight Image Super-Resolution [6]. They propose an Involution and BSConv Multi-Depth Distillation Network (IBMDN), consisting of 6 Involution and BSConv Multi-Depth Distillation Blocks (IBMDB). IBMDB integrates Involution and BSConv to balance computational efficiency and feature extraction. The overall architecture of their proposed model consists of four main sections: shallow feature extraction, deep feature extraction, feature fusion, and reconstruction. A $3 \\times 3$ convolution is used to extract shallow features. Then, through 6 IBMDB blocks, deep features are extracted and fused using a $1 \\times 1$ convolution, followed by refinement through a $3 \\times 3$ convolution. The pixel-shuffle operation is then used as the reconstruction module.", + "bbox": [ + 89, + 551, + 482, + 777 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "The Involution and BSConv Multi-Depth Distillation Block (IBMDB) consists of three shallow residual blocks (SRB_IBMD) and one channel contrast attention (CCA) block. Based on previous experiments, the use of $3 \\times 3$ convolutions, due to computational complexity and a large number of parameters, is not always the best option, especially for lightweight super-resolution models [5]. In SISR models, a fixed structure for feature extraction blocks is", + "bbox": [ + 89, + 779, + 482, + 900 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "usually used, while features extracted at different depths of the network may differ. This approach may prevent the model from fully exploiting its capacity. Designing blocks with varying structures tailored to the depth of the network can enhance model performance. In their proposed model, the block structure is adjusted based on network depth to achieve an optimal feature extraction combination at different levels.", + "bbox": [ + 511, + 90, + 903, + 210 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "BSCnv reduces parameters using intra-kernel correlation, better preserves information, and improves model accuracy without increasing complexity. Involution, with fewer learning parameters, extracts visual features through its attention mechanism and increases efficiency. Therefore, in the Information distillation structure, they consider the block structure differently. At the beginning of the network, BSCnv is dominant in maintaining pixel correlation and local interactions within the block, and with increasing depth, Involution becomes the dominant operator. If BSCnv is denoted by B and Involution by I, the optimal block combination in the deep feature extraction section is as follows: BBB-BBB-BIB-BIB-IBI-IBI. The details of the blocks are shown in the Fig. 39.", + "bbox": [ + 511, + 212, + 906, + 424 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "4.37. Cidaut AI", + "text_level": 1, + "bbox": [ + 511, + 436, + 635, + 450 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "They propose a lightweight yet effective network with three blocks: an initial Sobel-based block and two ESA-based edge refinement blocks, regulated by a global residual connection. Upscaling is performed via pixel shuffle for efficient super-resolution.", + "bbox": [ + 511, + 459, + 905, + 534 + ], + "page_idx": 36 + }, + { + "type": "image", + "img_path": "images/0efe7737ced44ffc023622d39d82998bc7cf70f73b427d5f237ad9c301089f96.jpg", + "image_caption": [ + "Figure 40. Team Cidaut AI: Fused Edge Attention Network (FEAN) structure. They also show the Sobel Fused Residual Block (SFRB) and the Inverted Residual Bottlenecks (IRB) [86]." + ], + "image_footnote": [], + "bbox": [ + 519, + 553, + 901, + 750 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "As shown in Fig. 40, the design integrates two MobileNet Inverted Bottlenecks [86] with channel shuffle and SiLU activation for enhanced information mixing. Inspired by EFDN [117], Sobel-based attention extracts edge features, refined using partial convolutions [84] with minimal", + "bbox": [ + 511, + 825, + 905, + 900 + ], + "page_idx": 36 + }, + { + "type": "page_number", + "text": "37", + "bbox": [ + 488, + 924, + 506, + 935 + ], + "page_idx": 36 + }, + { + "type": "image", + "img_path": "images/e80fade4787f96534f1e2ef24e32d09dadef36238ccf578c2e38675c21631355.jpg", + "image_caption": [ + "Figure 41. Team Cidaut AI: Structure of the Enhanced ESA Block (EEB)." + ], + "image_footnote": [], + "bbox": [ + 101, + 90, + 475, + 232 + ], + "page_idx": 37 + }, + { + "type": "text", + "text": "parameter increase. The final attention map, a weighted sum of refined $Gx$ , $Gy$ , and $GxGy$ , undergoes further refinement via partial convolution. A final $1 \\times 1$ convolution preserves details while preventing excessive edge processing.", + "bbox": [ + 89, + 304, + 483, + 380 + ], + "page_idx": 37 + }, + { + "type": "text", + "text": "The proposed ERIB block, an efficient convolutional unit with self-activation, starts with depthwise convolution and $1 \\times 1$ feature expansion [86]. Partial convolutions [84] refine features, while channel shuffle enhances mixing. Inspired by Simple Gate [10], they introduce nonlinearity by reducing channels without increasing parameters. A weighted residual connection with partial convolution ensures effective information propagation, maintaining competitive performance despite PyTorch inefficiencies.", + "bbox": [ + 89, + 380, + 483, + 516 + ], + "page_idx": 37 + }, + { + "type": "text", + "text": "For the EEB in Fig. 41, they draw inspiration from the ReNRB block [91], replacing reparameterized convolutions with ERIB for improved efficiency. Partial convolutions in the ESA bottleneck and residual connections further exploit feature map redundancy.", + "bbox": [ + 89, + 517, + 483, + 593 + ], + "page_idx": 37 + }, + { + "type": "text", + "text": "Training Strategy. The training was carried out using the DIV2K, FLICK2R, and LSIDR (30%) datasets to improve the model's generalization ability. As a baseline, the model was trained for 1000 epochs with a cosine annealing learning rate scheduler, a crop size of $512 \\times 512$ , and a batch size of 16. Due to instability in the loss during training, an optimal learning rate analysis was performed whenever the loss diverged. This led to the implementation of a learning rate sweep strategy, which was organized into 5 stages.", + "bbox": [ + 89, + 593, + 483, + 729 + ], + "page_idx": 37 + }, + { + "type": "text", + "text": "4.38.IVL", + "text_level": 1, + "bbox": [ + 89, + 739, + 169, + 755 + ], + "page_idx": 37 + }, + { + "type": "text", + "text": "Method. Their approach builds upon the strategy used in SPAN [108], last year's winning method, to extract attention maps and integrates it into the proposed baseline architecture, EFDN [116], aiming to enhance feature extraction and structural representation in image processing tasks.", + "bbox": [ + 89, + 763, + 482, + 839 + ], + "page_idx": 37 + }, + { + "type": "text", + "text": "Specifically, as illustrated in Figure 42, this strategy is incorporated within the EDBB blocks of EFDN, which are designed to capture fundamental structural features of an image by applying Sobel and Laplacian filters. These fil", + "bbox": [ + 89, + 839, + 483, + 900 + ], + "page_idx": 37 + }, + { + "type": "text", + "text": "ters emphasize edge and texture information, contributing to improved representation learning. During the inference phase, the EDBB blocks are reparametrized into 3x3 convolutions to maintain computational efficiency while preserving learned feature representations.", + "bbox": [ + 511, + 90, + 905, + 167 + ], + "page_idx": 37 + }, + { + "type": "text", + "text": "The attention maps are derived following the approach implemented in SPAN, leveraging an activation function that is both odd and symmetric to effectively highlight essential regions of the image. These attention maps serve as a direct substitute for the ESA block present in the original EFDN model, aiming to refine feature selection and enhance the model's overall performance.", + "bbox": [ + 511, + 179, + 905, + 285 + ], + "page_idx": 37 + }, + { + "type": "text", + "text": "As a result of the applied modifications, the final architecture has a lower parameter count and requires fewer floating-point operations compared to the proposed baseline method, EFDN.", + "bbox": [ + 511, + 297, + 905, + 358 + ], + "page_idx": 37 + }, + { + "type": "text", + "text": "Training Details. The training process is structured into three progressive phases to optimize performance and stability:", + "bbox": [ + 511, + 371, + 905, + 417 + ], + "page_idx": 37 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Pre-training: The model undergoes an initial training phase using the DIV2K dataset, incorporating data augmentation techniques such as random rotations, horizontal flipping, and random cropping to generate patches of size $64 \\times 64$ . Training is conducted over 30,000 iterations with a batch size of 32, utilizing the Adam optimizer $(\\beta_{1} = 0.9, \\beta_{2} = 0.999)$ . The learning rate is initially set to 1e-3 for the first 20,000 iterations and subsequently reduced to 1e-4 for the remaining 10,000 iterations. L1 loss is used throughout this phase.", + "- First training stage: The model is further refined using the DIV2K_LSDIR dataset, while maintaining the same augmentation strategies as in the pre-training phase. The patch size is increased to $256 \\times 256$ , and training is extended to 100,000 iterations with a batch size of 64. The Adam optimizer $(\\beta_{1} = 0.9, \\beta_{2} = 0.999)$ is employed, starting with a learning rate of 5e-4, which undergoes a decay by a factor of 0.5 every 20,000 iterations. L1 loss remains the chosen loss function for this stage.", + "- Second training stage: In the final phase, training continues on the DIV2K_LSDIR dataset with an expanded patch size of $512 \\times 512$ for an additional 40,000 iterations. The same augmentation methods are retained, and most hyperparameters remain unchanged. However, to ensure stable convergence and fine-tune performance, the learning rate is reduced to 5e-5. During this stage, L1 loss is applied for the first 10,000 iterations, after which L2 loss is utilized to enhance final model performance." + ], + "bbox": [ + 513, + 429, + 906, + 851 + ], + "page_idx": 37 + }, + { + "type": "text", + "text": "All the training phases were performed of the model a single NVIDIA RTX 4070 Super GPU and required approximately 20 hours.", + "bbox": [ + 526, + 852, + 905, + 898 + ], + "page_idx": 37 + }, + { + "type": "page_number", + "text": "38", + "bbox": [ + 488, + 924, + 506, + 935 + ], + "page_idx": 37 + }, + { + "type": "image", + "img_path": "images/a34fd9595397439c984a401aa9617a0634a85b8e638fa7ea12403f01e0a3c2f6.jpg", + "image_caption": [ + "Figure 42. Team IVL: Schematic diagram of the method." + ], + "image_footnote": [], + "bbox": [ + 89, + 88, + 911, + 315 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "Acknowledgments", + "text_level": 1, + "bbox": [ + 91, + 363, + 250, + 380 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "This work was partially supported by the Humboldt Foundation, the Ministry of Education and Science of Bulgaria (support for INSAIT, part of the Bulgarian National Roadmap for Research Infrastructure). We thank the NTIRE 2025 sponsors: ByteDance, Meituan, Kuaishou, and University of Wurzburg (Computer Vision Lab).", + "bbox": [ + 89, + 388, + 483, + 479 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "A. Teams and Affiliations", + "text_level": 1, + "bbox": [ + 89, + 491, + 307, + 507 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "NTIRE 2025 ESR Teams", + "text_level": 1, + "bbox": [ + 89, + 515, + 285, + 530 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "Title: NTIRE 2025 Efficient Super-Resolution Challenge", + "bbox": [ + 89, + 537, + 473, + 553 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "Members:", + "bbox": [ + 89, + 554, + 163, + 566 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "Bin Ren $^{1,2,4}$ (bin. ren@unitn.it),", + "bbox": [ + 89, + 566, + 303, + 582 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "Hang Guo $^{3}$ (cshguo@gmail.com),", + "bbox": [ + 89, + 583, + 318, + 598 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "Lei Sun4 (lei.sun@insait.ai)", + "bbox": [ + 89, + 598, + 277, + 613 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "Zongwei Wu5 (zongwei.wu@uni-wuerzburg.de),", + "bbox": [ + 89, + 613, + 415, + 628 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "Radu Timofte $^{5}$ (radu.timofte@vision.ee.ethz.ch)", + "bbox": [ + 89, + 628, + 411, + 643 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "Yawei $\\mathrm{Li^{6}}$ (li.yawei.ai@gmail.com),", + "bbox": [ + 89, + 643, + 333, + 659 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "Affiliations:", + "bbox": [ + 89, + 659, + 174, + 672 + ], + "page_idx": 38 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1 University of Pisa, Italy", + "$^{2}$ University of Trento, Italy", + "3 Tsinghua University, China", + "4 INSÄIT, Sofia University,\"St. Kliment Ohridski\", Bulgaria", + "5 Computer Vision Lab, University of Würzburg, Germany", + "$^{6}$ ETH Zürich, Switzerland" + ], + "bbox": [ + 91, + 674, + 480, + 762 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "EMSR", + "text_level": 1, + "bbox": [ + 91, + 787, + 147, + 801 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "Title: Distillation-Supervised Convolutional Low-Rank", + "bbox": [ + 89, + 809, + 482, + 824 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "Adaptation for Efficient Image Super-Resolution", + "bbox": [ + 89, + 824, + 413, + 840 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "Members:", + "bbox": [ + 91, + 840, + 163, + 853 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "Yao Zhang $^{1}$ (yao_zhang@sjtu.edu.cn),", + "bbox": [ + 89, + 854, + 349, + 869 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "Xinning Chai1 (chaixinning@sjtu.edu.cn),", + "bbox": [ + 89, + 869, + 375, + 885 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "Zhengxue Cheng1 (zxcheng@sjtu.edu.cn),", + "bbox": [ + 89, + 885, + 377, + 900 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "Yingsheng Qin $^{2}$ (yingsheng.qin@transsion.com),", + "bbox": [ + 513, + 364, + 844, + 378 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "Yucai Yang $^{2}$ (yucai.yang@transsion.com),", + "bbox": [ + 514, + 378, + 800, + 393 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "Li Song $^{1}$ (song_li@sjtu.edu.cn),", + "bbox": [ + 514, + 393, + 730, + 410 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "Affiliations:", + "bbox": [ + 514, + 410, + 598, + 422 + ], + "page_idx": 38 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "$^{1}$ Shanghai Jiao Tong University", + "2 Transsion in China" + ], + "bbox": [ + 514, + 424, + 728, + 453 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "XiaomiMM", + "text_level": 1, + "bbox": [ + 514, + 481, + 609, + 494 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "Title: SPANF", + "bbox": [ + 514, + 503, + 614, + 517 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "Members:", + "bbox": [ + 514, + 518, + 584, + 532 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "Hongyuan $\\mathrm{Yu}^1$ (yuhyuan1995@gmail.com),", + "bbox": [ + 514, + 532, + 807, + 549 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "Pufan $\\mathrm{Xu}^2$ (xpf22@mails.tsinghua.edu.cn),", + "bbox": [ + 514, + 549, + 800, + 564 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "Cheng Wan3 (jouiney666@gmail.com),", + "bbox": [ + 514, + 564, + 777, + 579 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "Zhijuan Huang1 (huangzhijuan@xiaomi.com),", + "bbox": [ + 514, + 579, + 821, + 594 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "Peng Guo $^{4}$ (guopeng0100@163.com),", + "bbox": [ + 514, + 594, + 769, + 609 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "Shuyuan Cui5 (jouiney666@gmail.com),", + "bbox": [ + 514, + 609, + 785, + 625 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "Chenjun Li $^{3}$ (cl2733@cornell.edu),", + "bbox": [ + 514, + 625, + 751, + 638 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "Xuehai Hu (hsquare@mail.ustc.edu.cn),", + "bbox": [ + 514, + 638, + 779, + 655 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "Pan Pan1 (panpan@xiaomi.com),", + "bbox": [ + 514, + 655, + 736, + 670 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "Xin Zhang $^{1}$ (zhangxin14@xiaomi.com),", + "bbox": [ + 514, + 670, + 782, + 685 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "Heng Zhang $^{1}$ (zhangheng8@xiaomi.com),", + "bbox": [ + 514, + 685, + 795, + 700 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "Affiliations:", + "bbox": [ + 514, + 715, + 596, + 729 + ], + "page_idx": 38 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1 Multimedia Department, Xiaomi Inc.", + "$^{2}$ School of Integrated Circuits, Tsinghua University", + "3 Cornell University", + "4 Hanhai Information Technology (Shanghai) Co., Ltd.", + "5 Huatai Insurance Group Co., Ltd." + ], + "bbox": [ + 514, + 729, + 875, + 806 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "ShannonLab", + "text_level": 1, + "bbox": [ + 514, + 832, + 617, + 845 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "Title: Reparameterization Network for Efficient Image", + "bbox": [ + 513, + 854, + 903, + 869 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "Super-Resolution", + "bbox": [ + 514, + 869, + 630, + 883 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "Members:", + "bbox": [ + 514, + 885, + 584, + 898 + ], + "page_idx": 38 + }, + { + "type": "page_number", + "text": "39", + "bbox": [ + 488, + 924, + 508, + 936 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "Qing Luo $^{1}$ (luoqing.94@qq.com), Linyan Jiang $^{1}$ , Haibo Lei $^{1}$ , Qifang Gao $^{1}$ , Yaqing Li $^{1}$ ,", + "bbox": [ + 89, + 90, + 318, + 166 + ], + "page_idx": 39 + }, + { + "type": "text", + "text": "Affiliations: \n1Tencent", + "bbox": [ + 89, + 181, + 174, + 210 + ], + "page_idx": 39 + }, + { + "type": "text", + "text": "TSSR", + "text_level": 1, + "bbox": [ + 89, + 237, + 140, + 251 + ], + "page_idx": 39 + }, + { + "type": "text", + "text": "Title: Light Network for Efficient Image Super-Resolution \nMembers: \nWeihua Luo1 (185471613@qq.com), \nTsing Li1,", + "bbox": [ + 89, + 258, + 482, + 320 + ], + "page_idx": 39 + }, + { + "type": "text", + "text": "Affiliations: \n1 Independent researcher", + "bbox": [ + 89, + 335, + 259, + 364 + ], + "page_idx": 39 + }, + { + "type": "text", + "text": "mbga", + "text_level": 1, + "bbox": [ + 89, + 391, + 138, + 406 + ], + "page_idx": 39 + }, + { + "type": "text", + "text": "Title: Expanded SPAN for Efficient Super-Resolution Members: \nQing Wang $^{1}$ (wangqing.Keen@bytedance.com), \nYi Liu $^{1}$ , \nYang Wang $^{1}$ , \nHongyu An $^{1}$ , \nLiou Zhang $^{1}$ , \nShijie Zhao $^{1}$ ,", + "bbox": [ + 89, + 412, + 450, + 534 + ], + "page_idx": 39 + }, + { + "type": "text", + "text": "Affiliations: \n1 ByteDance", + "bbox": [ + 89, + 549, + 179, + 579 + ], + "page_idx": 39 + }, + { + "type": "text", + "text": "VPEG_C", + "text_level": 1, + "bbox": [ + 89, + 604, + 165, + 618 + ], + "page_idx": 39 + }, + { + "type": "text", + "text": "Title: DAN: Dual Attention Network for lightweight Image Super-Resolution \nMembers: \nLianhong Song1 (songlianhong@njust.edu.cn), \nLong Sun1, \nJinshan Pan1, \nJiangxin Dong1, \nJinhui Tang1", + "bbox": [ + 89, + 626, + 482, + 747 + ], + "page_idx": 39 + }, + { + "type": "text", + "text": "Affiliations: \n1Nanjing University of Science and Technology", + "bbox": [ + 89, + 763, + 413, + 792 + ], + "page_idx": 39 + }, + { + "type": "text", + "text": "XUPTBoys", + "text_level": 1, + "bbox": [ + 89, + 816, + 181, + 833 + ], + "page_idx": 39 + }, + { + "type": "text", + "text": "Title: Frequency-Guided Multi-level Dispersion Network for Efficient Image Super-Resolution \nMembers: Jing Wei1 (freedomwj@126.com),", + "bbox": [ + 89, + 839, + 482, + 901 + ], + "page_idx": 39 + }, + { + "type": "text", + "text": "Mengyang Wang1, Ruilong Guo1, Qian Wang1,2, Affiliations:", + "bbox": [ + 513, + 90, + 640, + 148 + ], + "page_idx": 39 + }, + { + "type": "text", + "text": "$^{1}$ Xi'an University of Posts and Telecommunications $^{2}$ National Engineering Laboratory for Cyber Event Warning and Control Technologies", + "bbox": [ + 514, + 151, + 903, + 196 + ], + "page_idx": 39 + }, + { + "type": "text", + "text": "HannahSR", + "text_level": 1, + "bbox": [ + 514, + 218, + 602, + 233 + ], + "page_idx": 39 + }, + { + "type": "text", + "text": "Title: Multi-level Refinement and Bias-learnable Attention Dual Branch Network for Efficient Image Super-Resolution Members: Qingliang Liu $^{1}$ (liuqingliang1@honor.com), Yang Cheng $^{2}$ (obliviate73@outlook.com) Affiliations: \n $^{1}$ Beijing Honor Device Co., Ltd. \n $^{2}$ State Key Laboratory of Integrated Chip & System, Fudan University", + "bbox": [ + 513, + 239, + 903, + 377 + ], + "page_idx": 39 + }, + { + "type": "text", + "text": "Davinci", + "text_level": 1, + "bbox": [ + 514, + 398, + 576, + 412 + ], + "page_idx": 39 + }, + { + "type": "text", + "text": "Title: PlayerAug \nMembers: \nDavinci (1016994139@qq.com), \nEnxuan Gu1(guexstan@163.com), \nAffiliations: \n1 Dalian University of Technology", + "bbox": [ + 514, + 420, + 743, + 512 + ], + "page_idx": 39 + }, + { + "type": "text", + "text": "SRCB", + "text_level": 1, + "bbox": [ + 514, + 532, + 566, + 547 + ], + "page_idx": 39 + }, + { + "type": "text", + "text": "Title: SPAN with pruning. \nMembers: \nDafeng Zhang1 (dfeng.zhang@samsung.com), Yang Yong1, \nAffiliations: \n1 Samsung Research China - Beijing (SRC-B)", + "bbox": [ + 514, + 556, + 821, + 647 + ], + "page_idx": 39 + }, + { + "type": "text", + "text": "Rochester", + "text_level": 1, + "bbox": [ + 514, + 667, + 594, + 681 + ], + "page_idx": 39 + }, + { + "type": "text", + "text": "Title: ESRNet: An enhanced version of SPAN for Efficient Super-Resolution \nMembers: \nPinxin Liu1 (pliu23@ur.rochester.edu), \nYongsheng Yu1 (yyu90@ur.rochester.edu), \nHang Hua1 (hhua2@cs.rochester.edu), \nYunlong Tang1 (yunlong.tang@rochester.edu), \nAffiliations: \n1 University of Rochester", + "bbox": [ + 513, + 690, + 903, + 825 + ], + "page_idx": 39 + }, + { + "type": "text", + "text": "IESR", + "text_level": 1, + "bbox": [ + 514, + 848, + 560, + 862 + ], + "page_idx": 39 + }, + { + "type": "text", + "text": "Title: Inference Efficient Super-Rosolution Net Members:", + "bbox": [ + 513, + 869, + 831, + 898 + ], + "page_idx": 39 + }, + { + "type": "page_number", + "text": "40", + "bbox": [ + 488, + 924, + 508, + 936 + ], + "page_idx": 39 + }, + { + "type": "text", + "text": "Shihao Wang1 (shihao.wsh@antgroup.com), Yukun Yang1, Zhiyu Zhang1, Affiliations: \n1 Ant Group", + "bbox": [ + 89, + 90, + 387, + 166 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "ASR", + "text_level": 1, + "bbox": [ + 91, + 196, + 132, + 210 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Title: ASR", + "bbox": [ + 91, + 220, + 173, + 234 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Members:", + "bbox": [ + 91, + 237, + 161, + 250 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Yukun Yang $^{1}$ (yukun.yyk@antgroup.com),", + "bbox": [ + 91, + 251, + 375, + 267 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Affiliations:", + "bbox": [ + 91, + 268, + 174, + 281 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "1 None", + "bbox": [ + 93, + 281, + 142, + 295 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "VPEG_O", + "text_level": 1, + "bbox": [ + 91, + 327, + 166, + 342 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Title: SAFMNv3: Simple Feature Modulation Network for Real-Time Image Super-Resolution", + "bbox": [ + 89, + 351, + 482, + 382 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Members:", + "bbox": [ + 91, + 383, + 161, + 396 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Long Sun1 (cs.longsun@njust.edu.cn),", + "bbox": [ + 91, + 397, + 348, + 412 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Lianhong Son1,", + "bbox": [ + 91, + 412, + 202, + 426 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Jinshan Pan1,", + "bbox": [ + 91, + 428, + 183, + 441 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Jiangxin Dong1,", + "bbox": [ + 91, + 443, + 202, + 458 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Jinhui Tang", + "bbox": [ + 91, + 458, + 176, + 472 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Affiliations:", + "bbox": [ + 91, + 474, + 174, + 487 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "1 Nanjing University of Science and Technology", + "bbox": [ + 93, + 487, + 411, + 503 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "mmSR", + "text_level": 1, + "bbox": [ + 91, + 534, + 148, + 547 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Title: Efficient Feature Aggregation Network for Image Super-Resolution", + "bbox": [ + 89, + 558, + 482, + 588 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Members:", + "bbox": [ + 91, + 589, + 161, + 602 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Jiyu $\\mathsf{W u}^1$ (jiyu_wu@163.com),", + "bbox": [ + 91, + 604, + 297, + 619 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Jiancheng Huang $^{1}$ (jc.huang@siat.ac.cn),", + "bbox": [ + 91, + 619, + 364, + 633 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Yifan Liu1,", + "bbox": [ + 91, + 633, + 173, + 647 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Yi Huang $^{1}$ ,", + "bbox": [ + 91, + 648, + 173, + 662 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Shifeng Chen 1,", + "bbox": [ + 91, + 662, + 197, + 679 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Affiliations:", + "bbox": [ + 91, + 680, + 174, + 694 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "1 Shenzhen Institutes of Advanced Technology, Chinese Academy of Sciences", + "bbox": [ + 91, + 694, + 482, + 724 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "ChanSR", + "text_level": 1, + "bbox": [ + 91, + 755, + 161, + 768 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Title: EECNet: Edge Enhanced Convolutional Network for Efficient Super-Resolution", + "bbox": [ + 89, + 779, + 482, + 810 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Members:", + "bbox": [ + 91, + 811, + 161, + 823 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Rui Chen1 (chenr269@163.com),", + "bbox": [ + 91, + 824, + 315, + 840 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Affiliations:", + "bbox": [ + 91, + 840, + 174, + 854 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "1 Shenzhen International Graduate School, Tsinghua University, China", + "bbox": [ + 91, + 854, + 482, + 885 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Pixel Alchemists", + "text_level": 1, + "bbox": [ + 514, + 90, + 647, + 104 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Title: RCUNet", + "bbox": [ + 514, + 112, + 622, + 126 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Members:", + "bbox": [ + 514, + 128, + 584, + 141 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Yi Feng $^{1}$ (fenyi_work@163.com),", + "bbox": [ + 514, + 143, + 740, + 157 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Mingxi $\\mathrm{Li}^1$", + "bbox": [ + 514, + 157, + 594, + 172 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Cailu Wan1,", + "bbox": [ + 514, + 172, + 596, + 186 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Xiangji $\\mathbf{W}\\mathbf{u}^{1}$", + "bbox": [ + 514, + 188, + 604, + 203 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Affiliations:", + "bbox": [ + 514, + 219, + 596, + 232 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "$^{1}$ Independent researcher", + "bbox": [ + 514, + 233, + 681, + 247 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "LZ", + "text_level": 1, + "bbox": [ + 514, + 272, + 542, + 286 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Title: Tensor decompose efficient super-resolution network", + "bbox": [ + 513, + 294, + 903, + 309 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Members:", + "bbox": [ + 514, + 310, + 584, + 323 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Zibin Liu1 (1451971605@qq.com),", + "bbox": [ + 514, + 324, + 751, + 339 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Jinyang Zhong $^{2}$ (1439764064@qq.com),", + "bbox": [ + 514, + 340, + 785, + 354 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Affiliations:", + "bbox": [ + 514, + 369, + 596, + 383 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "$^{1}$ Southwest Jiaotong University", + "bbox": [ + 514, + 383, + 728, + 398 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Sichuan University", + "bbox": [ + 514, + 400, + 653, + 414 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Z6", + "text_level": 1, + "bbox": [ + 514, + 439, + 540, + 453 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Title: GLoReNet: Global and Local feature Refinement Network for Efficient Super-Resolution", + "bbox": [ + 513, + 460, + 903, + 491 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Members:", + "bbox": [ + 514, + 492, + 584, + 503 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Kihwan Yoon $^{1}$ (rlghksdbs@gmail.com),", + "bbox": [ + 514, + 506, + 782, + 521 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Ganzorig Gankhuyag1,", + "bbox": [ + 514, + 522, + 669, + 537 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Affiliations:", + "bbox": [ + 514, + 537, + 596, + 551 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "$^{1}$ Korea Electronics Technology Institute (KETI)", + "bbox": [ + 514, + 551, + 836, + 566 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "TACO_SR", + "text_level": 1, + "bbox": [ + 514, + 590, + 596, + 604 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Title: TenInOneSR", + "bbox": [ + 514, + 613, + 647, + 626 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Members:", + "bbox": [ + 514, + 628, + 584, + 641 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Shengyun Zhong $^{1}$ (shengyunzhong2002@gmail.com),", + "bbox": [ + 514, + 643, + 874, + 657 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Mingyang $\\mathbf{W u}^{2}$ (mingyang@tamu.edu),", + "bbox": [ + 514, + 657, + 777, + 672 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Renjie $\\mathrm{Li}^2$ renjie@tamu.edu),", + "bbox": [ + 514, + 672, + 717, + 686 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Yushen Zuo $^{3}$ (zuoyushen12@gmail.com),", + "bbox": [ + 514, + 688, + 792, + 703 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Zhengzhong $\\mathrm{Tu}^2$ (tzz@tamu.edu),", + "bbox": [ + 514, + 704, + 741, + 718 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Affiliations:", + "bbox": [ + 514, + 719, + 596, + 732 + ], + "page_idx": 40 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1 Northeastern University, USA", + "$^{2}$ Texas A&M University, USA", + "3 The Hong Kong Polytechnic University, Hong Kong" + ], + "bbox": [ + 514, + 733, + 870, + 779 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "AIOT.AI", + "text_level": 1, + "bbox": [ + 514, + 801, + 589, + 816 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Title: Efficient channel attention super-resolution network acting on space", + "bbox": [ + 513, + 824, + 903, + 854 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Members:", + "bbox": [ + 514, + 856, + 584, + 868 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Zongang Gao $1^{1}$ (gaozongang@qq.com),", + "bbox": [ + 514, + 869, + 785, + 885 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Guannan Chen1,", + "bbox": [ + 514, + 886, + 625, + 898 + ], + "page_idx": 40 + }, + { + "type": "page_number", + "text": "41", + "bbox": [ + 488, + 924, + 506, + 936 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Yuan Tian1,", + "bbox": [ + 91, + 90, + 174, + 104 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Wenhui Chen", + "bbox": [ + 91, + 106, + 189, + 119 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Affiliations:", + "bbox": [ + 91, + 122, + 174, + 135 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "$^{1}$ BOE, AIOT CTO, Beijing, China", + "bbox": [ + 91, + 135, + 325, + 151 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "JNU620", + "text_level": 1, + "bbox": [ + 91, + 176, + 158, + 191 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Title: Reparameterized Residual Local Feature Network for Efficient Image Super-Resolution", + "bbox": [ + 91, + 198, + 482, + 228 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Members:", + "bbox": [ + 91, + 229, + 161, + 243 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Weijun Yuan $^{1}$ (yweijun@stu2022.jnu.edu.cn),", + "bbox": [ + 91, + 244, + 395, + 258 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Zhan Li1,", + "bbox": [ + 91, + 260, + 156, + 273 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Yihang Chen1,", + "bbox": [ + 91, + 273, + 191, + 289 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Yifan Deng1,", + "bbox": [ + 91, + 290, + 183, + 305 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Ruting Deng1,", + "bbox": [ + 91, + 305, + 189, + 320 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Affiliations:", + "bbox": [ + 91, + 321, + 174, + 334 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "$^{1}$ Jinan University", + "bbox": [ + 91, + 334, + 212, + 349 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "LVGroup_HFUT", + "text_level": 1, + "bbox": [ + 91, + 375, + 223, + 391 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Title: Swift Parameter-free Attention Network for Efficient Image Super-Resolution", + "bbox": [ + 91, + 397, + 482, + 426 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Members:", + "bbox": [ + 91, + 429, + 161, + 441 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Yilin Zhang $^{1}$ (eslzzyl@163.com),", + "bbox": [ + 91, + 443, + 316, + 458 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Huan Zheng $^{2}$ , (huanzheng1998@gmail.com),", + "bbox": [ + 91, + 458, + 393, + 473 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Yanyan Wei1 (weiyy@hfut.edu.cn),", + "bbox": [ + 91, + 473, + 328, + 487 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Wenxuan Zhao $^{1}$ (nightvoyagerr@gmail.com),", + "bbox": [ + 91, + 488, + 395, + 503 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Suiyi Zhao $^{1}$ (meranderzhao@gmail.com),", + "bbox": [ + 91, + 503, + 370, + 518 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Fei Wang1 (jiafei127@gmail.com),", + "bbox": [ + 91, + 518, + 326, + 534 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Kun Li $^{1}$ (kunli.hfut@gmail.com),", + "bbox": [ + 91, + 534, + 316, + 549 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Affiliations:", + "bbox": [ + 91, + 550, + 174, + 563 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "1 Hefei University of Technology", + "bbox": [ + 91, + 563, + 313, + 579 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "2 University of Macau", + "bbox": [ + 91, + 579, + 241, + 594 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "YG", + "text_level": 1, + "bbox": [ + 91, + 619, + 122, + 633 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Title: Spatial-Gate Self-Distillation Network for Efficient Image Super-Resolution", + "bbox": [ + 91, + 641, + 482, + 671 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Members:", + "bbox": [ + 91, + 672, + 161, + 685 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Yinggan Tang $^{1}$ (ygtang@ysu.edu.cn),", + "bbox": [ + 91, + 686, + 346, + 702 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Mengjie Su 2,", + "bbox": [ + 91, + 702, + 186, + 717 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Affiliations:", + "bbox": [ + 91, + 718, + 174, + 732 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "$^{1}$ School of Electrical Engineering, Yanshan University", + "bbox": [ + 91, + 732, + 455, + 747 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "MegastudyEdu_Vision.AI", + "text_level": 1, + "bbox": [ + 91, + 772, + 292, + 787 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Title: Multi-scale Aggregation Attention Network for Efficient Image Super-resolution", + "bbox": [ + 91, + 794, + 482, + 825 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Members:", + "bbox": [ + 91, + 825, + 161, + 838 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Jae-hyeon Lee $^{1}$ (dlwogus147@gmail.com),", + "bbox": [ + 91, + 839, + 383, + 854 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Dong-Hyeop Son1,", + "bbox": [ + 91, + 854, + 223, + 869 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Ui-Jin Choi1,", + "bbox": [ + 91, + 869, + 187, + 883 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Affiliations:", + "bbox": [ + 91, + 885, + 174, + 900 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "$^{1}$ MegastudyEdu Vision AI", + "bbox": [ + 514, + 90, + 696, + 106 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "MILA", + "text_level": 1, + "bbox": [ + 514, + 131, + 566, + 145 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Title: Multi-Level Variance Feature Modulation Network for Lightweight Image Super-Resolution", + "bbox": [ + 513, + 152, + 903, + 184 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Members:", + "bbox": [ + 514, + 185, + 584, + 196 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Tiancheng Shao1 (shaotiancheng666@outlook.com),", + "bbox": [ + 514, + 198, + 864, + 214 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Yuqing Zhang2", + "bbox": [ + 514, + 214, + 620, + 229 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Mengcheng $\\mathrm{Ma}^3$", + "bbox": [ + 514, + 229, + 632, + 244 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Affiliations:", + "bbox": [ + 514, + 244, + 596, + 258 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "1 Anhui University of Technology", + "bbox": [ + 514, + 258, + 741, + 275 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "AiMF_SR", + "text_level": 1, + "bbox": [ + 514, + 299, + 593, + 314 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Title: Mixture of Efficient Attention for Efficient Image Super-Resolution", + "bbox": [ + 513, + 321, + 903, + 352 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Members:", + "bbox": [ + 514, + 353, + 584, + 364 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Donggeun $\\mathrm{Ko}^1$ (sean.ko@aimfuture.ai),", + "bbox": [ + 514, + 367, + 777, + 383 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Youngsang Kwak1,", + "bbox": [ + 514, + 383, + 643, + 397 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Jiun Lee1,", + "bbox": [ + 514, + 398, + 584, + 411 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Jaehwa Kwak1,", + "bbox": [ + 514, + 412, + 620, + 426 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Affiliations:", + "bbox": [ + 514, + 428, + 596, + 443 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "1 AiM Future Inc.", + "bbox": [ + 514, + 443, + 635, + 455 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "BVIVSR", + "text_level": 1, + "bbox": [ + 514, + 483, + 586, + 497 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Title: NTIRE 2025 Efficient SR Challenge Factsheet", + "bbox": [ + 513, + 505, + 867, + 521 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Members:", + "bbox": [ + 514, + 521, + 584, + 534 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Yuxuan Jiang $^{1}$ (yuxuan.jiang@bristol.ac.uk),", + "bbox": [ + 514, + 535, + 812, + 551 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Qiang Zhu $^{2,1}$ (zhuqiang@std.uestc.edu.cn),", + "bbox": [ + 514, + 551, + 803, + 566 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Siyue Teng1 (siyue.teng@bristol.ac.uk),", + "bbox": [ + 514, + 566, + 779, + 580 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Fan Zhang1, (fan.zhang@bristol.ac.uk),", + "bbox": [ + 514, + 580, + 777, + 595 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Shuyuan Zhu2, (eezsy@uestc.edu.cn),", + "bbox": [ + 514, + 595, + 767, + 611 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Bing Zeng $^{2}$ , (eezeng@uestc.edu.cn),", + "bbox": [ + 514, + 611, + 759, + 626 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "David Bull $^{1}$ (dave.bull@bristol.ac.uk),", + "bbox": [ + 514, + 626, + 769, + 641 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Affiliations:", + "bbox": [ + 514, + 642, + 596, + 656 + ], + "page_idx": 41 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1 University of Bristol", + "$^{2}$ University of Electronic Science and Technology of China" + ], + "bbox": [ + 514, + 657, + 903, + 686 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "CUIT_HTT", + "text_level": 1, + "bbox": [ + 514, + 710, + 607, + 726 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Title: Frequency-Segmented Attention Network for Lightweight Image Super", + "bbox": [ + 513, + 734, + 903, + 763 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Members:", + "bbox": [ + 514, + 765, + 584, + 777 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Jing Hu1 (jing_hu@163.com),", + "bbox": [ + 514, + 777, + 714, + 795 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Hui Deng1,", + "bbox": [ + 514, + 795, + 591, + 809 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Xuan Zhang $^{1}$ ,", + "bbox": [ + 514, + 810, + 609, + 824 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Lin Zhu", + "bbox": [ + 514, + 825, + 576, + 838 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Qinrui Fan", + "bbox": [ + 514, + 839, + 594, + 854 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Affiliations:", + "bbox": [ + 514, + 856, + 596, + 869 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "1 Chengdu University of Information Technology", + "bbox": [ + 514, + 869, + 841, + 885 + ], + "page_idx": 41 + }, + { + "type": "page_number", + "text": "42", + "bbox": [ + 488, + 924, + 508, + 936 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "GXZY.AI", + "text_level": 1, + "bbox": [ + 91, + 90, + 174, + 104 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Title: Parameter Free Vision Mamba For Lightweight Image Super-Resolution", + "bbox": [ + 89, + 112, + 482, + 143 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Members:", + "bbox": [ + 91, + 143, + 163, + 156 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Weijian Deng $^{1}$ (348957269@qq.com),", + "bbox": [ + 91, + 157, + 349, + 172 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Junnan $\\mathbf{W u}^{1}$ (838050895@qq.com),", + "bbox": [ + 91, + 172, + 333, + 188 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Wenqin Deng $^{2}$ (1601524278@qq.com),", + "bbox": [ + 91, + 188, + 356, + 203 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Yuquan Liu $^{1}$ (653060432@qq.com),", + "bbox": [ + 91, + 203, + 334, + 218 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Zhaohong $\\mathrm{Xu}^{1}$ (719357155@qq.com),", + "bbox": [ + 91, + 218, + 349, + 233 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Affiliations:", + "bbox": [ + 91, + 233, + 174, + 247 + ], + "page_idx": 42 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1 Guangxi China Tobacco Industry Corporation Limited, China", + "2 Guangxi University, China" + ], + "bbox": [ + 91, + 247, + 482, + 294 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "IPCV", + "text_level": 1, + "bbox": [ + 91, + 316, + 140, + 332 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Title: Efficient HiTSR", + "bbox": [ + 91, + 339, + 246, + 354 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Members:", + "bbox": [ + 91, + 354, + 163, + 368 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Jameer Babu Pinjari $^{1}$ (jameer.jb@gmail.com),", + "bbox": [ + 91, + 369, + 401, + 385 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Kuldeep Purohit $^{1}$ , (kuldeeppurohit3@gmail.com)", + "bbox": [ + 91, + 385, + 423, + 400 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Affiliations:", + "bbox": [ + 91, + 400, + 174, + 415 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "$^{1}$ Independent researcher", + "bbox": [ + 91, + 415, + 259, + 430 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "X-L", + "text_level": 1, + "bbox": [ + 91, + 454, + 127, + 469 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Title: Partial Permuted Self-Attention for Lightweight Super-Resolution", + "bbox": [ + 91, + 476, + 482, + 506 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Members:", + "bbox": [ + 91, + 507, + 163, + 520 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Zeyu Xiao $^{1}$ (zeyuxiao1997@163.com),", + "bbox": [ + 91, + 522, + 352, + 537 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Zhuoyuan Li $^{2}$ (zhuoyuanli@mail.ustc.edu.cn)", + "bbox": [ + 91, + 537, + 395, + 551 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Affiliations:", + "bbox": [ + 91, + 551, + 174, + 566 + ], + "page_idx": 42 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "$^{1}$ National University of Singapore", + "$^{2}$ University of Science and Technology of China" + ], + "bbox": [ + 91, + 566, + 416, + 597 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Quantum_Res", + "text_level": 1, + "bbox": [ + 91, + 622, + 204, + 638 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Title: Efficient Mamba-Based Image Super-Resolution via Knowledge Distillation", + "bbox": [ + 91, + 643, + 482, + 672 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Members:", + "bbox": [ + 91, + 675, + 163, + 686 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Surya Vashist $^{1}$ (surya.vashisth@s.amity.edu),", + "bbox": [ + 91, + 688, + 400, + 704 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Akshay Dudhane $^{2}$ (akshay.dudhane@mbzuai.ac.ae),", + "bbox": [ + 91, + 704, + 437, + 718 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Praful Hambarde3 (praful@iitmandi.ac.in),", + "bbox": [ + 91, + 718, + 377, + 734 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Sachin Chaudhary $^{4}$ (sachin.chaudhary@ddn.upes.ac.in),", + "bbox": [ + 91, + 734, + 462, + 750 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Satya Naryan Tazi $^{5}$ (satya.tazi@ecajmer.ac.in),", + "bbox": [ + 91, + 750, + 403, + 763 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Prashant Patil $^{6}$ (pwpatil@iitg.ac.in),", + "bbox": [ + 91, + 763, + 333, + 779 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Santosh Kumar Vipparthi7 (skvipparthi@iitrpr.ac.in),", + "bbox": [ + 91, + 779, + 442, + 795 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Subrahmanyam Murala8 (muralas@tcd.ie),", + "bbox": [ + 91, + 795, + 377, + 810 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Affiliations:", + "bbox": [ + 91, + 810, + 174, + 824 + ], + "page_idx": 42 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1 Amity University Punjab, India", + "$^{2}$ Mohamed Bin Zayed University of Artificial Intelligence, Abu Dhabi", + "3 Indian Institute of Technology Mandi, India", + "4 UPES Dehradun, India" + ], + "bbox": [ + 91, + 824, + 480, + 898 + ], + "page_idx": 42 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "$^{5}$ Government Engineering College Ajmer, India", + "$^{6}$ Indian Institute of Technology Guwahati, India", + "$^{7}$ Indian Institute of Technology Ropar, India", + "$^{8}$ Trinity College Dublin, Ireland" + ], + "bbox": [ + 513, + 90, + 836, + 151 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "SylabSR", + "text_level": 1, + "bbox": [ + 513, + 176, + 584, + 191 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Title: AutoRegressive Residual Local Feature Network", + "bbox": [ + 511, + 198, + 883, + 213 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Members:", + "bbox": [ + 514, + 214, + 584, + 227 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Wei-Chen Shen $^{1}$ (r11921a38@ntu.edu.tw),", + "bbox": [ + 514, + 228, + 800, + 243 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "I-Hsiang Chen $^{1,2}$ ,", + "bbox": [ + 514, + 243, + 640, + 258 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Affiliations:", + "bbox": [ + 514, + 258, + 596, + 273 + ], + "page_idx": 42 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "$^{1}$ National Taiwan University", + "2 University of Washington" + ], + "bbox": [ + 514, + 273, + 709, + 304 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "NJUPCA", + "text_level": 1, + "bbox": [ + 513, + 329, + 589, + 344 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Title: Spatial-Frequency Fusion Model for Efficient Super-Resolution", + "bbox": [ + 511, + 351, + 906, + 381 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Members:", + "bbox": [ + 514, + 382, + 584, + 396 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Yunzhe $\\mathbf{X}\\mathbf{u}^{1}$ (221900144@smail.nju.edu.cn),", + "bbox": [ + 514, + 396, + 812, + 412 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Chen Zhao1,", + "bbox": [ + 514, + 412, + 602, + 426 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Zhizhou Chen1,", + "bbox": [ + 514, + 426, + 622, + 441 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Affiliations:", + "bbox": [ + 514, + 441, + 596, + 457 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "$^{1}$ Nanjing University", + "bbox": [ + 514, + 457, + 653, + 472 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "DepthIBN", + "text_level": 1, + "bbox": [ + 513, + 497, + 596, + 513 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Title: Involution and BSConv Multi-Depth Distillation Network for Lightweight Image Super-Resolution", + "bbox": [ + 511, + 518, + 905, + 550 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Members:", + "bbox": [ + 514, + 550, + 584, + 563 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Akram Khatami-Rizi $^{1}$ (akramkhatami67@gmail.com), Ahmad Mahmoudi-Aznaveh $^{1}$ , (a.mahmoudi@sbu.ac.ir", + "bbox": [ + 514, + 564, + 880, + 594 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Affiliations:", + "bbox": [ + 514, + 595, + 596, + 609 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "1 Cyberspace Research Institute of Shahid Beheshti University of Iran", + "bbox": [ + 514, + 609, + 903, + 640 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Cidaut.AI", + "text_level": 1, + "bbox": [ + 514, + 665, + 596, + 679 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Title: Fused Edge Attention Network", + "bbox": [ + 513, + 686, + 767, + 702 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Members:", + "bbox": [ + 514, + 703, + 584, + 715 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Alejandro Merino1 (alemer@cidaut.es),", + "bbox": [ + 514, + 717, + 777, + 732 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Bruno Longarela1 (brulon@cidaut.es),", + "bbox": [ + 514, + 732, + 769, + 747 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Javier Abad1 (javaba@cidadut.es),", + "bbox": [ + 514, + 747, + 736, + 762 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Marcos V. Conde $^{2}$ (marcos.conde@uni-wuerzburg.de),", + "bbox": [ + 514, + 762, + 877, + 777 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Affiliations:", + "bbox": [ + 514, + 777, + 596, + 792 + ], + "page_idx": 42 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1 Cidaut AI, Spain", + "$^{2}$ University of Würzburg, Germany" + ], + "bbox": [ + 514, + 792, + 753, + 823 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "IVL", + "text_level": 1, + "bbox": [ + 514, + 847, + 550, + 862 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Title: PAEDN", + "bbox": [ + 513, + 869, + 617, + 883 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Members:", + "bbox": [ + 514, + 885, + 584, + 898 + ], + "page_idx": 42 + }, + { + "type": "page_number", + "text": "43", + "bbox": [ + 488, + 924, + 508, + 936 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Simone Bianco $^{1}$ (simone.bianco@unimib.com),", + "bbox": [ + 89, + 90, + 410, + 104 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Luca Cogo1 (luca.cogo@unimib.com),", + "bbox": [ + 89, + 106, + 349, + 119 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Gianmarco Corti1 (g.corti1967@campus.unimib.com),", + "bbox": [ + 89, + 121, + 452, + 136 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Affiliations:", + "text_level": 1, + "bbox": [ + 89, + 152, + 174, + 165 + ], + "page_idx": 43 + }, + { + "type": "ref_text", + "text": "$^{1}$ Department of Informatics Systems and Communication, University of Milano-Bicocca, Viale Sarca 336, Building U14, Milan, Italy", + "bbox": [ + 89, + 166, + 482, + 212 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 91, + 239, + 187, + 256 + ], + "page_idx": 43 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Lusine Abrahamyan, Anh Minh Truong, Wilfried Philips, and Nikos Deligiannis. Gradient variance loss for structure-enhanced image super-resolution. In ICASSP 2022-2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 3219-3223. IEEE, 2022. 3", + "[2] Eirikur Agustsson and Radu Timofte. Ntire 2017 challenge on single image super-resolution: Dataset and study. In 2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW), pages 1122-1131, 2017. 14", + "[3] Eirikur Agustsson and Radu Timofte. Ntire 2017 challenge on single image super-resolution: Dataset and study. In The IEEE Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2017. 33", + "[4] Eirikur Agustsson and Radu Timofte. NTIRE 2017 challenge on single image super-resolution: Dataset and study. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops, pages 126-135, 2017. 2, 18, 19, 22, 23, 26, 33", + "[5] Akram Khatami-Rizi Ahmad Mahmoudi-Aznaveh. The role of involution in lightweight super resolution. 2024 13th Iranian/3rd International Machine Vision and Image Processing Conference (MVIP), 2024. 37", + "[6] Akram Khatami-Rizi Ahmad Mahmoudi-Aznaveh. Involution and bsconv multi-depth distillation network for lightweight image super-resolution. arXiv preprint arXiv:2503.14779, 2025. 37", + "[7] Sidra Aleem, Julia Dietlmeier, Eric Arazo, and Suzanne Little. Convlora and adabn based domain adaptation via self-training. In 2024 IEEE International Symposium on Biomedical Imaging (ISBI), pages 1-5. IEEE, 2024. 6, 7", + "[8] Jiezhang Cao, Qin Wang, Yongqin Xian, Yawei Li, Bingbing Ni, Zhiming Pi, Kai Zhang, Yulun Zhang, Radu Timofte, and Luc Van Gool. Ciaosr: Continuous implicit attention-in-attention network for arbitrary-scale image super-resolution. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1796–1807, 2023. 2", + "[9] Jierun Chen, Shiu-hong Kao, Hao He, Weipeng Zhuo, Song Wen, Chul-Ho Lee, and S-H Gary Chan. Run, don't walk: Chasing higher flops for faster neural networks. In IEEE Conf. Comput. Vis. Pattern Recog., 2023. 33", + "[10] Liangyu Chen, Xiaojie Chu, Xiangyu Zhang, and Jian Sun. Simple baselines for image restoration, 2022. 38" + ], + "bbox": [ + 99, + 265, + 482, + 900 + ], + "page_idx": 43 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[11] Zheng Chen, Zongwei Wu, Eduard Zamfir, Kai Zhang, Yu-lun Zhang, Radu Timofte, Xiaokang Yang, Hongyuan Yu, Cheng Wan, Yuxin Hong, et al. Ntire 2024 challenge on image super-resolution (x4): Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6108-6132, 2024. 30", + "[12] Zheng Chen, Kai Liu, Jue Gong, Jingkai Wang, Lei Sun, Zongwei Wu, Radu Timofte, Yulun Zhang, et al. NTIRE 2025 challenge on image super-resolution $(\\times 4)$ : Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2", + "[13] Zheng Chen, Jingkai Wang, Kai Liu, Jue Gong, Lei Sun, Zongwei Wu, Radu Timofte, Yulun Zhang, et al. NTIRE 2025 challenge on real-world face restoration: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2", + "[14] Sung-Jin Cho, Seo-Won Ji, Jun-Pyo Hong, Seung-Won Jung, and Sung-Jea Ko. Rethinking coarse-to-fine approach in single image deblurring. In ICCV, 2021. 10, 17, 29", + "[15] Sung-Jin Cho, Seo-Won Ji, Jun-Pyo Hong, Seung-Won Jung, and Sung-Jea Ko. Rethinking coarse-to-fine approach in single image deblurring. In ICCV, pages 4641-4650, 2021. 18, 25, 26", + "[16] Marcos Conde, Radu Timofte, et al. NTIRE 2025 challenge on raw image restoration and super-resolution. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2", + "[17] Marcos Conde, Radu Timofte, et al. Raw image reconstruction from RGB on smartphones. NTIRE 2025 challenge report. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2", + "[18] Marcos V Conde, Zhijun Lei, Wen Li, Christos Bampis, Ioannis Katsavounidis, and Radu Timofte. Aim 2024 challenge on efficient video super-resolution for av1 compressed content. arXiv preprint arXiv:2409.17256, 2024. 30", + "[19] Weijian Deng, Hongjie Yuan, Lunhui Deng, and Zengtong Lu. Reparameterized residual feature network for lightweight image super-resolution. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 1712-1721, 2023. 22", + "[20] Xiaohan Ding, Yuchen Guo, Guiguang Ding, and Jungong Han. Acnet: Strengthening the kernel skeletons for powerful cnn via asymmetric convolution blocks. In Proceedings of the IEEE/CVF international conference on computer vision, pages 1911-1920, 2019. 3", + "[21] Xiaohan Ding, Xiangyu Zhang, Jungong Han, and Guiguang Ding. Diverse branch block: Building a convolution as an inception-like unit. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10886-10895, 2021. 3", + "[22] Xiaohan Ding, Xiangyu Zhang, Ningning Ma, Jungong Han, Guiguang Ding, and Jian Sun. Repvgg: Making veg-style convnets great again. In Proceedings of the IEEE/CVF" + ], + "bbox": [ + 522, + 92, + 906, + 900 + ], + "page_idx": 43 + }, + { + "type": "page_number", + "text": "44", + "bbox": [ + 488, + 924, + 508, + 936 + ], + "page_idx": 43 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Conference on Computer Vision and Pattern Recognition, pages 13733-13742, 2021. 6", + "[23] Xiaohan Ding, Xiangyu Zhang, Ningning Ma, Jungong Han, Guiguang Ding, and Jian Sun. Repvgg: Making vgg-style convnets great again. In CVPR, 2021. 9, 17", + "[24] Jie Du, Kai Guan, Yanhong Zhou, Yuanman Li, and Tianfu Wang. Parameter-free similarity-aware attention module for medical image classification and segmentation. IEEE Transactions on Emerging Topics in Computational Intelligence, 2022. 6", + "[25] Zongcai Du, Ding Liu, Jie Liu, Jie Tang, Gangshan Wu, and Lean Fu. Fast and memory-efficient network towards efficient image super-resolution. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 853-862, 2022. 18", + "[26] Zongcai Du, Ding Liu, Jie Liu, Jie Tang, Gangshan Wu, and Lean Fu. Fast and memory-efficient network towards efficient image super-resolution. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 853-862, 2022. 36", + "[27] Stefan Elfwing, Eiji Uchibe, and Kenji Doya. Sigmoid-weighted linear units for neural network function approximation in reinforcement learning, 2017. 15, 17", + "[28] Egor Ershov, Sergey Korchagin, Alexei Khalin, Artyom Panshin, Arsenyi Terekhin, Ekaterina Zaychenkova, Georgiy Lobarev, Vsevolod Plokhotnyuk, Denis Abramov, Elisey Zhdanov, Sofia Dorogova, Yasin Mamedov, Nikola Banic, Georgii Perevozhikov, Radu Timofte, et al. NTIRE 2025 challenge on night photography rendering. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2", + "[29] Yuqian Fu, Xingyu Qiu, Bin Ren Yanwei Fu, Radu Timofte, Nicu Sebe, Ming-Hsuan Yang, Luc Van Gool, et al. NTIRE 2025 challenge on cross-domain few-shot object detection: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2", + "[30] Albert Gu and Tri Dao. Mamba: Linear-time sequence modeling with selective state spaces. arXiv preprint arXiv:2312.00752, 2023. 6", + "[31] Enxuan Gu, Hongwei Ge, and Yong Guo. Code: An explicit content decoupling framework for image restoration. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2920-2930, 2024. 14", + "[32] Hang Guo, Yong Guo, Yaohua Zha, Yulun Zhang, Wenbo Li, Tao Dai, Shu-Tao Xia, and Yawei Li. Mambairv2: Attentive state space restoration. arXiv preprint arXiv:2411.15269, 2024. 6, 30, 34, 35", + "[33] Hang Guo, Jinmin Li, Tao Dai, Zhihao Ouyang, Xudong Ren, and Shu-Tao Xia. Mambair: A simple baseline for image restoration with state-space model. In European Conference on Computer Vision, pages 222-241. Springer, 2024. 33", + "[34] Daniel Haase and Manuel Amthor. Rethinking depthwise separable convolutions: How intra-kernel correlations lead to improved mobilenets. In Proceedings of the IEEE/CVF" + ], + "bbox": [ + 99, + 92, + 482, + 900 + ], + "page_idx": 44 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "conference on computer vision and pattern recognition, pages 14600-14609, 2020. 31, 37", + "[35] Kai Han, Yunhe Wang, Qi Tian, Jianyuan Guo, Chunjing Xu, and Chang Xu. Ghostnet: More features from cheap operations. In IEEE Conf. Comput. Vis. Pattern Recog., pages 1580-1589, 2020. 19", + "[36] Shuhao Han, Haotian Fan, Fangyuan Kong, Wenjie Liao, Chunle Guo, Chongyi Li, Radu Timofte, et al. NTIRE 2025 challenge on text to image generation model quality assessment. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2", + "[37] Zibin He, Tao Dai, Jian Lu, Yong Jiang, and Shu-Tao Xia. Faked: Feature-affinity based knowledge distillation for efficient image super-resolution. In 2020 IEEE international conference on image processing (ICIP), pages 518-522. IEEE, 2020. 7", + "[38] Dan Hendrycks and Kevin Gimpel. Gaussian error linear units (gelus). arXiv preprint arXiv:1606.08415, 2016. 25", + "[39] Andrew Howard, Mark Sandler, Grace Chu, Liang-Chieh Chen, Bo Chen, Mingxing Tan, Weijun Wang, Yukun Zhu, Ruoming Pang, Vijay Vasudevan, et al. Searching for MobileNetV3. In Proceedings of the IEEE International Conference on Computer Vision, pages 1314-1324, 2019. 26", + "[40] Andrew G Howard, Menglong Zhu, Bo Chen, Dmitry Kalenichenko, Weijun Wang, Tobias Weyand, Marco Andreetto, and Hartwig Adam. Mobilenets: Efficient convolutional neural networks for mobile vision applications. arXiv preprint arXiv:1704.04861, 2017. 37", + "[41] Mu Hu, Junyi Feng, Jiashen Hua, Baisheng Lai, Jianqiang Huang, Xiaojin Gong, and Xian-Sheng Hua. Online convolutional re-parameterization. CoRR, abs/2204.00826, 2022. 19", + "[42] Zhewei Huang, Tianyuan Zhang, Wen Heng, Boxin Shi, and Shuchang Zhou. Real-time intermediate flow estimation for video frame interpolation. In Proceedings of the European Conference on Computer Vision (ECCV), 2022. 6, 9", + "[43] Zheng Hui, Xiumei Wang, and Xinbo Gao. Fast and accurate single image super-resolution via information distillation network. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 723-731, 2018. 36", + "[44] Zheng Hui, Xinbo Gao, Yunchu Yang, and Xiumei Wang. Lightweight image super-resolution with information multi-distillation network. In Proceedings of the 27th acm international conference on multimedia, pages 2024-2032, 2019. 11", + "[45] Zheng Hui, Xinbo Gao, Yunchu Yang, and Xiumei Wang. Lightweight image super-resolution with information multi-distillation network. In Proceedings of the 27th acm international conference on multimedia, pages 2024-2032, 2019. 10, 36", + "[46] Pavel Izmailov, Dmitrii Podoprikhin, Timur Garipov, Dmitry Vetrov, and Andrew Gordon Wilson. Averaging weights leads to wider optima and better generalization. arXiv preprint arXiv:1803.05407, 2018. 23" + ], + "bbox": [ + 522, + 93, + 903, + 900 + ], + "page_idx": 44 + }, + { + "type": "page_number", + "text": "45", + "bbox": [ + 488, + 924, + 506, + 935 + ], + "page_idx": 44 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[47] Varun Jain, Zongwei Wu, Quan Zou, Louis Florentin, Henrik Turbell, Sandeep Siddhartha, Radu Timofte, et al. NTIRE 2025 challenge on video quality enhancement for video conferencing: Datasets, methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2", + "[48] Yuxuan Jiang, Chen Feng, Fan Zhang, and David Bull. Mtkd: Multi-teacher knowledge distillation for image super-resolution. In European Conference on Computer Vision, pages 364–382. Springer, 2024. 30, 31", + "[49] Yuxuan Jiang, Ho Man Kwan, Tianhao Peng, Ge Gao, Fan Zhang, Xiaqing Zhu, Joel Sole, and David Bull. HIIF: Hierarchical encoding based implicit image function for continuous super-resolution. arXiv preprint arXiv:2412.03748, 2024. 30", + "[50] Yuxuan Jiang, Jakub Nawala, Chen Feng, Fan Zhang, Xiaogjing Zhu, Joel Sole, and David Bull. Rtsr: A real-time super-resolution model for av1 compressed content. arXiv preprint arXiv:2411.13362, 2024. 30", + "[51] Yuxuan Jiang, Jakub Nawala, Fan Zhang, and David Bull. Compressing deep image super-resolution models. In 2024 Picture Coding Symposium (PCS), pages 1-5. IEEE, 2024. 14, 30", + "[52] Yuxuan Jiang, Chengxi Zeng, Siyue Teng, Fan Zhang, Xiaogjing Zhu, Joel Sole, and David Bull. C2D-ISR: Optimizing attention-based image super-resolution from continuous to discrete scales. arXiv preprint arXiv:2503.13740, 2025. 30, 31", + "[53] Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014. 12", + "[54] Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014. 8, 14, 18, 28, 30", + "[55] F. Kong, Mingxi Li, Songwei Liu, Ding Liu, Jingwen He, Yang Bai, Fangmin Chen, and Lean Fu. Residual local feature network for efficient super-resolution. 2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW), pages 765-775, 2022. 19, 22", + "[56] Fangyuan Kong, Mingxi Li, Songwei Liu, Ding Liu, Jingwen He, Yang Bai, Fangmin Chen, and Lean Fu. Residual local feature network for efficient super-resolution. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 766-776, 2022. 18, 35", + "[57] Kin Wai Lau, Lai-Man Po, and Yasar Abbas Ur Rehman. Large separable kernel attention: Rethinking the large kernel attention design in cnn. Expert Systems with Applications, 236:121352, 2023. 28", + "[58] Sangmin Lee, Eunpil Park, Angel Canelo, Hyunhee Park, Youngjo Kim, Hyungju Chun, Xin Jin, Chongyi Li, Chun-Le Guo, Radu Timofte, et al. NTIRE 2025 challenge on efficient burst hdr and restoration: Datasets, methods, and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2", + "[59] Xiaoyan Lei, Wenlong Zhang, and Weifeng Cao. Dvmsr: Distillated vision mamba for efficient super-resolution. In" + ], + "bbox": [ + 99, + 92, + 482, + 900 + ], + "page_idx": 45 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, pages 6536-6546, 2024. 33", + "[60] Duo Li, Jie Hu, Changhu Wang, Xiangtai Li, Qi She, Lei Zhu, Tong Zhang, and Qifeng Chen. Involution: Inverting the inheritance of convolution for visual recognition. Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2021. 37", + "[61] Xin Li, Yeying Jin, Xin Jin, Zongwei Wu, Bingchen Li, Yufei Wang, Wenhan Yang, Yu Li, Zhibo Chen, Bihan Wen, Robby Tan, Radu Timofte, et al. NTIRE 2025 challenge on day and night raindrop removal for dual-focused images: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2", + "[62] Xin Li, Xijun Wang, Bingchen Li, Kun Yuan, Yizhen Shao, Suhang Yao, Ming Sun, Chao Zhou, Radu Timofte, and Zhibo Chen. NTIRE 2025 challenge on short-formUGC video quality assessment and enhancement: Kwaisr dataset and study. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2", + "[63] Xin Li, Kun Yuan, Bingchen Li, Fengbin Guan, Yizhen Shao, Zihao Yu, Xijun Wang, Yiting Lu, Wei Luo, Suhang Yao, Ming Sun, Chao Zhou, Zhibo Chen, Radu Timofte, et al. NTIRE 2025 challenge on short-formUGC video quality assessment and enhancement: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2", + "[64] Yawei Li, Kai Zhang, Jingyun Liang, Jiezhang Cao, Ce Liu, Rui Gong, Yulun Zhang, Hao Tang, Yun Liu, Denis Demandolx, et al. Lsdir: A large scale dataset for image restoration. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops, 2023. 2, 6, 10, 12, 14, 16, 17, 18, 19, 23, 24, 26, 28, 30, 33, 36", + "[65] Yawei Li, Yulun Zhang, Luc Van Gool, Radu Timofte, et al. NTIRE 2023 challenge on efficient super-resolution: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops, 2023. 15, 16", + "[66] Zheyuan Li, Yingqi Liu, Xiangyu Chen, Haoming Cai, Jinjin Gu, Yu Qiao, and Chao Dong. Blueprint separable residual network for efficient image super-resolution. In 2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW), pages 832-842, 2022. 13, 26", + "[67] Zheyuan Li, Yingqi Liu, Xiangyu Chen, Haoming Cai, Jinjin Gu, Yu Qiao, and Chao Dong. Blueprint separable residual network for efficient image super-resolution. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 833-843, 2022. 10, 36", + "[68] Jie Liang, Radu Timofte, Qiaosi Yi, Zhengqiang Zhang, Shuaizheng Liu, Lingchen Sun, Rongyuan Wu, Xindong Zhang, Hui Zeng, Lei Zhang, et al. NTIRE 2025 the 2nd restore any image model (RAIM) in the wild challenge. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + ], + "bbox": [ + 524, + 92, + 903, + 900 + ], + "page_idx": 45 + }, + { + "type": "page_number", + "text": "46", + "bbox": [ + 488, + 925, + 508, + 936 + ], + "page_idx": 45 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[69] Bee Lim, Sanghyun Son, Heewon Kim, Seungjun Nah, and Kyoung Mu Lee. Enhanced deep residual networks for single image super-resolution. In 2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW), pages 1132-1140, 2017. 14", + "[70] Bee Lim, Sanghyun Son, Heewon Kim, Seungjun Nah, and Young Mu Lee. Enhanced deep residual networks for single image super-resolution. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops, pages 1132-1140, 2017. 12, 17, 26, 28, 30", + "[71] Jie Liu, Jie Tang, and Gangshan Wu. Residual feature distillation network for lightweight image super-resolution. In Proceedings of the European Conference on Computer Vision Workshops, pages 41-55. Springer, 2020. 10, 32, 36", + "[72] Jie Liu, Jie Tang, and Gangshan Wu. Residual feature distillation network for lightweight image super-resolution. In Computer Vision-ECCV 2020 Workshops: Glasgow, UK, August 23-28, 2020, Proceedings, Part III 16, pages 41-55. Springer, 2020. 21", + "[73] Jie Liu, Wenjie Zhang, Yuting Tang, Jie Tang, and Gangshan Wu. Residual feature aggregation network for image super-resolution. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 2359-2368, 2020. 11", + "[74] Xiaohong Liu, Xiongkuo Min, Qiang Hu, Xiaoyun Zhang, Jie Guo, et al. NTIRE 2025 XGC quality assessment challenge: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2", + "[75] Xiaoning Liu, Zongwei Wu, Florin-Alexandru Vasluianu, Hailong Yan, Bin Ren, Yulun Zhang, Shuhang Gu, Le Zhang, Ce Zhu, Radu Timofte, et al. NTIRE 2025 challenge on low light image enhancement: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2", + "[76] Zhuang Liu, Mingjie Sun, Tinghui Zhou, Gao Huang, and Trevor Darrell. Rethinking the value of network pruning. In ICLR, 2019. 2", + "[77] Zhaoyang Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, and Baining Guo. Proceedings of the IEEE/cvf international conference on computer vision. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 10012-10022, 2021. 12", + "[78] Ilya Loshchilov and Frank Hutter. Sgdr: Stochastic gradient descent with warm restarts. In *ICLR*, 2017, 17, 29", + "[79] Qi Ma, Yue Li, Bin Ren, Nicu Sebe, Ender Konukoglu, Theo Gevers, Luc Van Gool, and Danda Pani Paudel. Shapesplat: A large-scale dataset of gaussian splats and their self-supervised pretraining. In International Conference on 3D Vision 2025, 2024. 2", + "[80] Yanyu Mao, Nihao Zhang, Qian Wang, Bendu Bai, Wanying Bai, Haonan Fang, Peng Liu, Mingyue Li, and Shengbo Yan. Multi-level dispersion residual network for efficient image super-resolution. In 2023 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW), pages 1660-1669, 2023. 12", + "[81] Yanyu Mao, Nihao Zhang, Qian Wang, Bendu Bai, Wanying Bai, Haonan Fang, Peng Liu, Mingyue Li, and Shengbo" + ], + "bbox": [ + 99, + 90, + 485, + 901 + ], + "page_idx": 46 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Yan. Multi-level dispersion residual network for efficient image super-resolution. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops, pages 1660-1669, 2023. 10, 11, 28", + "[82] Jakub Nawala, Yuxuan Jiang, Fan Zhang, Xiaqing Zhu, Joel Sole, and David Bull. Bvi-aom: A new training dataset for deep video compression optimization. In 2024 IEEE International Conference on Visual Communications and Image Processing (VCIP), pages 1-5. IEEE, 2024. 30", + "[83] Ying Nie, Kai Han, Zhenhua Liu, An Xiao, Yiping Deng, Chunjing Xu, and Yunhe Wang. Ghostsr: Learning ghost features for efficient image super-resolution. CoRR, abs/2101.08525, 2021. 19", + "[84] Seung Park, Yoon-Jae Yeo, and Yong-Goo Shin. Pconv: simple yet effective convolutional layer for generative adversarial network. Neural Computing and Applications, 34 (9):7113-7124, 2022. 37, 38", + "[85] Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, and Soumith Chintala. Pytorch: An imperative style, high-performance deep learning library. arXiv preprint arXiv:1912.01703, 2019. 18", + "[86] Danfeng Qin, Chas Leichner, Manolis Delakis, Marco Fornoni, Shixin Luo, Fan Yang, Weijun Wang, Colby Banbury, Chengxi Ye, Berkin Akin, Vaibhav Aggarwal, Tenghui Zhu, Daniele Moro, and Andrew Howard. Mobilenetv4 - universal models for the mobile ecosystem, 2024. 37, 38", + "[87] Yajun Qiu, Qiang Zhu, Shuyuan Zhu, and Bing Zeng. Dual circle contrastive learning-based blind image superresolution. IEEE Transactions on Circuits and Systems for Video Technology, 34(3):1757-1771, 2023. 30", + "[88] Yunpeng Qu, Kun Yuan, Jinhua Hao, Kai Zhao, Qizhi Xie, Ming Sun, and Chao Zhou. Visual autoregressive modeling for image super-resolution. arXiv preprint arXiv:2501.18993, 2025. 35", + "[89] Bin Ren, Yahui Liu, Yue Song, Wei Bi, Rita Cucchiara, Nicu Sebe, and Wei Wang. Masked jigsaw puzzle: A versatile position embedding for vision transformers. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 20382-20391, 2023. 2", + "[90] Bin Ren, Yawei Li, Jingyun Liang, Rakesh Ranjan, Mengyuan Liu, Rita Cucchiara, Luc V Gool, Ming-Hsuan Yang, and Nicu Sebe. Sharing key semantics in transformer makes efficient image restoration. Advances in Neural Information Processing Systems, 37:7427-7463, 2024. 2", + "[91] Bin Ren, Yawei Li, Nancy Mehta, Radu Timofte, Hongyuan Yu, Cheng Wan, Yuxin Hong, Bingnan Han, Zhuoyuan Wu, Yajun Zou, et al. The ninth nitire 2024 efficient super-resolution challenge report. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6595-6631, 2024. 2, 3, 4, 6, 17, 21, 35, 38", + "[92] Bin Ren, Hang Guo, Lei Sun, Zongwei Wu, Radu Timofte, Yawei Li, et al. The tenth NTIRE 2025 efficient super-resolution challenge report. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + ], + "bbox": [ + 522, + 92, + 905, + 900 + ], + "page_idx": 46 + }, + { + "type": "page_number", + "text": "47", + "bbox": [ + 488, + 924, + 506, + 935 + ], + "page_idx": 46 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[93] Nickolay Safonov, Alexey Bryntsev, Andrey Moskalenko, Dmitry Kulikov, Dmitriy Vatolin, Radu Timofte, et al. NTIRE 2025 challenge on UGC video enhancement: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2", + "[94] Wenzhe Shi, Jose Caballero, Ferenc Huszár, Johannes Totz, Andrew P Aitken, Rob Bishop, Daniel Rueckert, and Zehan Wang. Real-time single image and video super-resolution using an efficient sub-pixel convolutional neural network. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 1874-1883, 2016. 25", + "[95] Long Sun, Jinshan Pan, and Jinhui Tang. Shufflemixer: An efficient convnet for image super-resolution. Advances in Neural Information Processing Systems, 35:17314-17326, 2022. 29", + "[96] Long Sun, Jiangxin Dong, Jinhui Tang, and Jinshan Pan. Spatially-adaptive feature modulation for efficient image super-resolution. In ICCV, 2023. 17", + "[97] Lei Sun, Andrea Alfarano, Peiqi Duan, Shaolin Su, Kaiwei Wang, Boxin Shi, Radu Timofte, Danda Pani Paudel, Luc Van Gool, et al. NTIRE 2025 challenge on event-based image deblurring: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2", + "[98] Lei Sun, Hang Guo, Bin Ren, Luc Van Gool, Radu Timofte, Yawei Li, et al. The tenth ntire 2025 image denoising challenge report. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2", + "[99] Yunlong Tang, Junjia Guo, Pinxin Liu, Zhiyuan Wang, Hang Hua, Jia-Xing Zhong, Yunzhong Xiao, Chao Huang, Luchuan Song, Susan Liang, Yizhi Song, Liu He, Jing Bi, Mingqian Feng, Xinyang Li, Zeliang Zhang, and Chen-liang Xu. Generative ai for cel-animation: A survey. arXiv preprint arXiv:2501.06250, 2025. 14", + "[100] Radu Timofte, Eirikur Agustsson, Luc Van Gool, Ming-Hsuan Yang, and Lei Zhang. Ntire 2017 challenge on single image super-resolution: Methods and results. In CVPR Workshops, 2017. 10, 17", + "[101] Radu Timofte, Eirikur Agustsson, Luc Van Gool, Ming-Hsuan Yang, and Lei Zhang. Ntire 2017 challenge on single image super-resolution: Methods and results. In Proceedings of the IEEE conference on computer vision and pattern recognition workshops, pages 114-125, 2017. 23, 33", + "[102] Radu Timofte, Eirikur Agustsson, Luc Van Gool, Ming-Hsuan Yang, and Lei Zhang. Ntire 2017 challenge on single image super-resolution: Methods and results. In CVPR workshops, pages 114-125, 2017. 12, 30", + "[103] Radu Timofte, Eirikur Agustsson, Luc Van Gool, Ming-Hsuan Yang, Lei Zhang, et al. NTIRE 2017 challenge on single image super-resolution: Methods and results. In CVPR Workshops, 2017. 17, 28", + "[104] Radu Timofte, Eirikur Agustsson, Shuhang Gu, J Wu, A Ignatov, and L Van Gool. Div2k dataset: Diverse 2k resolution high quality images as used for the challenges@ ntire (cvpr 2017 and cvpr 2018) and@ pirm (eccv 2018), 2018. 24, 36" + ], + "bbox": [ + 93, + 92, + 480, + 898 + ], + "page_idx": 47 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[105] Florin-Alexandru Vasluianu, Tim Seizinger, Zhuyun Zhou, Caitian Chen, Zongwei Wu, Radu Timofte, et al. NTIRE 2025 image shadow removal challenge report. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2", + "[106] Florin-Alexandru Vasluianu, Tim Seizinger, Zhuyun Zhou, Zongwei Wu, Radu Timofte, et al. NTIRE 2025 ambient lighting normalization challenge. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2", + "[107] Pavan Kumar Anasosalu Vasu, James Gabriel, Jeff Zhu, Oncel Tuzel, and Anurag Ranjan. An improved one millisecond mobile backbone. arXiv preprint arXiv:2206.04040, 2022. 9", + "[108] Cheng Wan, Hongyuan Yu, Zhiqi Li, Yihang Chen, Yajun Zou, Yuqing Liu, Xuanwu Yin, and Kunlong Zuo. Swift parameter-free attention network for efficient superresolution. arXiv preprint arXiv:2311.12770, 2023. 34, 35, 38", + "[109] Cheng Wan, Hongyuan Yu, Zhiqi Li, Yihang Chen, Ya-jun Zou, Yuqing Liu, Xuanwu Yin, and Kunlong Zuo. Swift parameter-free attention network for efficient super-resolution. In 2024 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW), pages 6246-6256, 2024. 12, 13", + "[110] Cheng Wan, Hongyuan Yu, Zhiqi Li, Yihang Chen, Yajun Zou, Yuqing Liu, Xuanwu Yin, and Kunlong Zuo. Swift parameter-free attention network for efficient superresolution. In IEEE Conf. Comput. Vis. Pattern Recog. Worksh., 2024. NTIRE 2024 ESR Challenge. 21", + "[111] Cheng Wan, Hongyuan Yu, Zhiqi Li, Yihang Chen, Yajun Zou, Yuqing Liu, Xuanwu Yin, and Kunlong Zuo. Swift parameter-free attention network for efficient superresolution. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6246-6256, 2024. 9, 20", + "[112] Cheng Wan, Hongyuan Yu, Zhiqi Li, Yihang Chen, Yajun Zou, Yuqing Liu, Xuanwu Yin, and Kunlong Zuo. Swift parameter-free attention network for efficient superresolution. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2024. 7, 8, 14, 20, 21, 23, 24, 26, 33, 36", + "[113] Hang Wang, Xuanhong Chen, Bingbing Ni, Yutian Liu, and Jinfan Liu. Omni aggregation networks for lightweight image super-resolution. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 22378-22387, 2023. 17", + "[114] Hongyuan Wang, Ziyan Wei, Qingting Tang, Shuli Cheng, Liejun Wang, and Yongming Li. Attention guidance distillation network for efficient image super-resolution. In 2024 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW), pages 6287-6296, 2024. 12, 13, 28", + "[115] Xintao Wang, Liangbin Xie, Ke Yu, Kelvin C.K. Chan, Chen Change Loy, and Chao Dong. BasicSR: Open source image and video restoration toolbox. https://github.com/XPixelGroup/BasicSR, 2022.29" + ], + "bbox": [ + 516, + 92, + 906, + 900 + ], + "page_idx": 47 + }, + { + "type": "page_number", + "text": "48", + "bbox": [ + 488, + 924, + 506, + 935 + ], + "page_idx": 47 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[116] Yan Wang. Edge-enhanced feature distillation network for efficient super-resolution. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, pages 777-785, 2022. 2, 3, 4, 18, 38", + "[117] Yan Wang. Edge-enhanced feature distillation network for efficient super-resolution, 2022. 37", + "[118] Yucong Wang and Minjie Cai. A single residual network with eta modules and distillation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1970-1980, 2023. 18", + "[119] Yan Wang, Yusen Li, Gang Wang, and Xiaoguang Liu. Multi-scale attention network for single image superresolution. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2024. 28", + "[120] Yan Wang, Yusen Li, Gang Wang, and Xiaoguang Liu. Pla-nusr: Chasing faster convnet for efficient super-resolution. arXiv preprint arXiv:2409.13435, 2024. 26", + "[121] Yingqian Wang, Zhengyu Liang, Fengyuan Zhang, Lvli Tian, Longguang Wang, Juncheng Li, Jungang Yang, Radu Timofte, Yulan Guo, et al. NTIRE 2025 challenge on light field image super-resolution: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2", + "[122] Gang Wu, Junjun Jiang, Junpeng Jiang, and Xianming Liu. Transforming image super-resolution: A convformer-based efficient approach. IEEE Transactions on Image Processing, 2024. 27, 28", + "[123] Chengxing Xie, Xiaoming Zhang, Linze Li, Yuqian Fu, Biao Gong, Tianrui Li, and Kai Zhang. Mat: Multi-range attention transformer for efficient image super-resolution. IEEE Transactions on Circuits and Systems for Video Technology, 2025. 2", + "[124] Xingyu Xie, Pan Zhou, Huan Li, Zhouchen Lin, and Shuicheng Yan. Adan: Adaptive nesterov momentum algorithm for faster optimizing deep models. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2024. 26", + "[125] Kangning Yang, Jie Cai, Ling Ouyang, Florin-Alexandru Vasluianu, Radu Timofte, Jiaming Ding, Huiming Sun, Lan Fu, Jinlong Li, Chiu Man Ho, Zibo Meng, et al. NTIRE 2025 challenge on single image reflection removal in the wild: Datasets, methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2", + "[126] Lingxiao Yang, Ru-Yuan Zhang, Lida Li, and Xiaohua Xie. Simam: A simple, parameter-free attention module for convolutional neural networks. In International conference on machine learning, pages 11863-11874. PMLR, 2021. 6", + "[127] Kihwan Yoon, Ganzorig Gankhuyag, Jinman Park, Haengseon Son, and Kyoungwon Min. Casr: Efficient cascade network structure with channel aligned method for 4k real-time single image super-resolution. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7911-7920, 2024. 21", + "[128] Lei Yu, Xinpeng Li, Youwei Li, Ting Jiang, Qi Wu, Haoqiang Fan, and Shuaicheng Liu. Dipnet: Efficiency distillation and iterative pruning for image super-resolution. In" + ], + "bbox": [ + 89, + 92, + 485, + 900 + ], + "page_idx": 48 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1692-1701, 2023. 15, 16", + "[129] Xiyu Yu, Tongliang Liu, Xinchao Wang, and Dacheng Tao. On compressing deep models by low rank and sparse decomposition. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 7370-7379, 2017. 2", + "[130] Pierluigi Zama Ramirez, Fabio Tosi, Luigi Di Stefano, Radu Timofte, Alex Costanzino, Matteo Poggi, Samuele Salti, Stefano Mattoccia, et al. NTIRE 2025 challenge on hr depth from images of specular and transparent surfaces. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2", + "[131] Eduard Zamfir, Zongwei Wu, Nancy Mehta, Yulun Zhang, and Radu Timofte. See more details: Efficient image superresolution by experts mining. In *Forty-first International Conference on Machine Learning*, 2024. 29", + "[132] Syed Waqas Zamir, Aditya Arora, Salman Khan, Munawar Hayat, Fahad Shahbaz Khan, and Ming-Hsuan Yang. Restormer: Efficient transformer for high-resolution image restoration. In CVPR, 2022. 10, 28", + "[133] Dafeng Zhang, Feiyu Huang, Shizhuo Liu, Xiaobing Wang, and Zhezhu Jin. Swinfir: Revisiting the swinir with fast fourier convolution and improved training for image super-resolution, 2022. 14", + "[134] Xiang Zhang. Hit-sr: Hierarchical transformer for efficient image super-resolution. https://github.com/XiangZ-0/HiT-SR, 2024. GitHub repository. 33", + "[135] Xiangyu Zhang, Xinyu Zhou, Mengxiao Lin, and Jian Sun. Shufflenet: An extremely efficient convolutional neural network for mobile devices. Proceedings of the IEEE conference on computer vision and pattern recognition, 2018. 37", + "[136] Xindong Zhang, Hui Zeng, and Lei Zhang. Edge-oriented convolution block for real-time super resolution on mobile devices. In MM '21: ACM Multimedia Conference, Virtual Event, China, October 20 - 24, 2021, pages 4034-4043. ACM, 2021. 19", + "[137] Xindong Zhang, Huiyu Zeng, and Lei Zhang. Edge-oriented convolution block for real-time super resolution on mobile devices. Proceedings of the 29th ACM International Conference on Multimedia, 2021. 19", + "[138] Xindong Zhang, Hui Zeng, and Lei Zhang. Edge-oriented convolution block for real-time super resolution on mobile devices. In Proceedings of the 29th ACM International Conference on Multimedia, pages 4034-4043, 2021. 3, 21", + "[139] Xiang Zhang, Yulun Zhang, and Fisher Yu. Hit-sr: Hierarchical transformer for efficient image super-resolution. In European Conference on Computer Vision, pages 483-500. Springer, 2024. 30", + "[140] Xiang Zhang, Yulun Zhang, and Fisher Yu. Hit-sr: Hierarchical transformer for efficient image super-resolution. arXiv preprint, arXiv:2407.05878, 2024. 33", + "[141] Yulun Zhang, Kai Zhang, Zheng Chen, Yawei Li, Radu Timofte, et al. NTIRE 2023 challenge on image superresolution (x4): Methods and results. In Proceedings of" + ], + "bbox": [ + 514, + 92, + 906, + 900 + ], + "page_idx": 48 + }, + { + "type": "page_number", + "text": "49", + "bbox": [ + 488, + 924, + 508, + 936 + ], + "page_idx": 48 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "the IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops, 2023. 30", + "[142] Hengyuan Zhao, Xiangtao Kong, Jingwen He, Yu Qiao, and Chao Dong. Efficient image super-resolution using pixel attention. In European Conference on Computer Vision, pages 56-72. Springer, 2020. 26", + "[143] Mengyi Zhao, Mengyuan Liu, Bin Ren, Shuling Dai, and Nicu Sebe. Denoising diffusion probabilistic models for action-conditioned 3d motion generation. In ICASSP 2024-2024 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 4225-4229. IEEE, 2024. 2", + "[144] Mingjun Zheng, Long Sun, Jiangxin Dong, and Jinshan Pan. Smfanet: A lightweight self-modulation feature aggregation network for efficient image super-resolution. In ECCV, 2024. 10, 17, 28", + "[145] Mingjun Zheng, Long Sun, Jiangxin Dong, and Jinshan Pan. Smfanet: A lightweight self-modulation feature aggregation network for efficient image super-resolution. In European Conference on Computer Vision, pages 359-375. Springer, 2024. 29", + "[146] Xu Zheng, Yunhao Luo, Pengyuan Zhou, and Lin Wang. Distilling efficient vision transformers from cnns for semantic segmentation. Pattern Recognition, 158:111029, 2025. 2", + "[147] Yupeng Zhou, Zhen Li, Chun-Le Guo, Song Bai, Ming-Ming Cheng, and Qibin Hou. Srformer: Permuted self-attention for single image super-resolution. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 12780–12791, 2023. 30, 33, 34", + "[148] Lianghui Zhu, Bencheng Liao, Qian Zhang, Xinlong Wang, Wenyu Liu, and Xinggang Wang. Vision mamba: Efficient visual representation learning with bidirectional state space model. In *Forty-first International Conference on Machine Learning*, 2024. 2", + "[149] Qiang Zhu, Pengfei Li, and Qianhui Li. Attention retractable frequency fusion transformer for image super resolution. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1756-1763, 2023. 30" + ], + "bbox": [ + 91, + 92, + 482, + 654 + ], + "page_idx": 49 + }, + { + "type": "page_number", + "text": "50", + "bbox": [ + 488, + 924, + 508, + 936 + ], + "page_idx": 49 + } +] \ No newline at end of file diff --git a/data/2025/2504_10xxx/2504.10686/02e14e26-d981-43b7-bd68-0bb6d5c44d72_model.json b/data/2025/2504_10xxx/2504.10686/02e14e26-d981-43b7-bd68-0bb6d5c44d72_model.json new file mode 100644 index 0000000000000000000000000000000000000000..a6cecedd62f01102e46d4f49dd10d8414473e97c --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/02e14e26-d981-43b7-bd68-0bb6d5c44d72_model.json @@ -0,0 +1,12609 @@ +[ + [ + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.263, + 0.061, + 0.705 + ], + "angle": 270, + "content": "arXiv:2504.10686v1 [cs.CV] 14 Apr 2025" + }, + { + "type": "title", + "bbox": [ + 0.152, + 0.131, + 0.844, + 0.152 + ], + "angle": 0, + "content": "The Tenth NTIRE 2025 Efficient Super-Resolution Challenge Report" + }, + { + "type": "table", + "bbox": [ + 0.104, + 0.18, + 0.892, + 0.658 + ], + "angle": 0, + "content": "
Bin Ren*Hang Guo*Lei Sun*Zongwei Wu*Radu Timofte*Yawei Li*
Yao ZhangXinning ChaiZhengxue ChengYingsheng QinYucai Yang
Li SongHongyuan YuPufan XuCheng WanZhijuan HuangPeng Guo
Shuyuan CuiChenjun LiXuehai HuPan PanXin ZhangHeng Zhang
Qing LuoLinyan JiangHaibo LeiQifang GaoYaqing LiWeihua Luo
Tsing LiQing WangYi LiuYang WangHongyu AnLiou Zhang
Shijie ZhaoLianhong SongLong SunJinshan PanJiangxin DongJinhui Tang
Jing WeiMengyang WangRuilong GuoQian WangQingliang Liu
Yang ChengDavinciEnxuan GuPinxin LiuYongsheng YuHang Hua
Yunlong TangShihao WangYukun YangZhiyu ZhangYukun YangJiyu Wu
Jiancheng HuangYifan LiuYi HuangShifeng ChenRui ChenYi Feng
Mingxi LiCailu WanXiangji WuZibin LiuJinyang ZhongKihwan Yoon
Ganzorig GankhuyagShengyun ZhongMingyang WuRenjie LiYushen Zuo
Zhengzhong TuZongang GaoGuannan ChenYuan TianWenhui Chen
Weijun YuanZhan LiYihang ChenYifan DengRuting DengYilin Zhang
Huan ZhengYanyan WeiWenxuan ZhaoSuiyi ZhaoFei WangKun Li
Yinggan TangMengjie SuJae-hyeon LeeDong-Hyeop SonUi-Jin Choi
Tiancheng ShaoYuqing ZhangMengcheng MaDonggeun KoYoungsang Kwak
Jiun LeeJaehwa KwakYuxuan JiangQiang ZhuSiyue TengFan Zhang
Shuyuan ZhuBing ZengDavid BullJing HuHui DengXuan Zhang
Lin ZhuQinrui FanWeijian DengJunnan WuWenqin DengYuquan Liu
Zhaohong XuJameer Babu PinjariKuldeep PurohitZeyu XiaoZhuoyuan Li
Surya VashisthAkshay DudhanePraful HambardeSachin Chaudhary
Satya Naryan TaziPrashant PatilSantosh Kumar VipparthiSubrahmanyam Murala
Wei-Chen ShenI-Hsiang ChenYunzhe XuChen ZhaoZhizhou Chen
Akram Khatami-RiziAhmad Mahmoudi-AznavehAlejandro MerinoBruno Longarela
Javier AbadMarcos V. CondeSimone BiancoLuca CogoGianmarco Corti
" + }, + { + "type": "title", + "bbox": [ + 0.249, + 0.692, + 0.326, + 0.707 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.724, + 0.482, + 0.755 + ], + "angle": 0, + "content": "This paper presents a comprehensive review of the NTIRE 2025 Challenge on Single-Image Efficient Super-Resolution" + }, + { + "type": "page_footnote", + "bbox": [ + 0.09, + 0.768, + 0.483, + 0.865 + ], + "angle": 0, + "content": "* B. Ren (bin.ren@unitn.it, University of Pisa & University of Trento, Italy), H. Guo (cshguo@gmail.com, Tsinghua University), L. Sun (lei.sun@insait.ai,INSAIT, Sofia University\"St. Kliment Ohridski\"), Z. Wu (zongwei.wu@uni-wuerzburg.de, University of Würzburg, Germany), R. Timofte (Radu.Timofte@uni-wuerzburg.de, University of Würzburg, Germany), and Y. Li (yawei.li@vision.ee.ethz.ch, ETH Zürich, Switzerland) were the challenge organizers, while the other authors participated in the challenge." + }, + { + "type": "page_footnote", + "bbox": [ + 0.093, + 0.866, + 0.389, + 0.876 + ], + "angle": 0, + "content": "Appendix A contains the authors' teams and affiliations." + }, + { + "type": "page_footnote", + "bbox": [ + 0.093, + 0.877, + 0.458, + 0.888 + ], + "angle": 0, + "content": "NTIRE 2025 webpage: https://cvslai.net/ntire/2025/." + }, + { + "type": "page_footnote", + "bbox": [ + 0.093, + 0.889, + 0.478, + 0.9 + ], + "angle": 0, + "content": "Code: https://github.com/Amazingren/NTIRE2025_ESR/." + }, + { + "type": "list", + "bbox": [ + 0.09, + 0.768, + 0.483, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.693, + 0.908, + 0.86 + ], + "angle": 0, + "content": "(ESR). The challenge aimed to advance the development of deep models that optimize key computational metrics, i.e., runtime, parameters, and FLOPs, while achieving a PSNR of at least 26.90 dB on the DIV2K_LSDIR_valid dataset and 26.99 dB on the DIV2K_LSDIR_test dataset. A robust participation saw 244 registered entrants, with 43 teams submitting valid entries. This report meticulously analyzes these methods and results, emphasizing groundbreaking advancements in state-of-the-art single-image ESR techniques. The analysis highlights innovative approaches and establishes benchmarks for future research in the field." + }, + { + "type": "page_number", + "bbox": [ + 0.495, + 0.926, + 0.503, + 0.936 + ], + "angle": 0, + "content": "1" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.093, + 0.09, + 0.223, + 0.106 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.115, + 0.482, + 0.297 + ], + "angle": 0, + "content": "Single image super-resolution (SR) is designed to reconstruct a high-resolution (HR) image from a single low-resolution (LR) image, typically affected by blurring and down-sampling. The standard degradation model in traditional SR, bicubic down-sampling, allows for consistent benchmarks and systematic comparisons among different SR methods. This framework also serves as a platform to highlight the advances in SR technologies. SR techniques are widely used in fields such as satellite imaging, medical image enhancement, and surveillance, where improved image quality is essential for accurate interpretation and analysis." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.297, + 0.483, + 0.493 + ], + "angle": 0, + "content": "State-of-the-art deep neural networks for image superresolution (SR) often suffer from overparameterization, intensive computation, and high latency, making their deployment on mobile devices for real-time SR applications challenging. To address these limitations, extensive research has focused on improving network efficiency through techniques such as network pruning, low-rank filter decomposition, network quantization, neural architecture search, state space modeling, diffusion priors, and knowledge distillation [76, 79, 89, 90, 129, 143, 146, 148]. These compression methods, successfully applied to image SR, optimize both the computational footprint and the operational speed [8, 91, 123]." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.493, + 0.483, + 0.719 + ], + "angle": 0, + "content": "Efficient SR is particularly crucial for edge computing and mobile devices, where processing power, energy availability, and memory are limited. The enhanced efficiency of SR models ensures that these devices can execute high-quality image processing in real-time without exhausting system resources or draining battery life rapidly. Metrics like runtime, parameter count, and computational complexity (FLOPs) are vital for assessing the suitability of SR models for edge deployment. These parameters are key in maintaining a balance between performance and resource use, ensuring that mobile devices can deliver advanced imaging capabilities efficiently. This balance is critical for the widespread adoption of advanced SR techniques in everyday applications, driving the development of AI-enabled technologies that are both powerful and accessible." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.72, + 0.483, + 0.901 + ], + "angle": 0, + "content": "In collaboration with the 2025 New Trends in Image Restoration and Enhancement (NTIRE 2025) workshop, we organize the challenge on single-image efficient superresolution. The challenge's goal is to super-resolve an LR image with a magnification factor of \\(\\times 4\\) using a network that reduces aspects such as runtime, parameters, FLOPs, of EFDN [116], while at least maintaining the \\(26.90~\\mathrm{dB}\\) on the DIV2K_LSDIR_valid dataset, and \\(26.99\\mathrm{dB}\\) on the DIV2K_LSDIR_test dataset. This challenge aims to discover advanced and innovative solutions for efficient SR, benchmark their efficiency, and identify general trends for the design of future efficient SR networks." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.092, + 0.905, + 0.364 + ], + "angle": 0, + "content": "This challenge is one of the NTIRE 2025 Workshop associated challenges on: ambient lighting normalization [106], reflection removal in the wild [125], shadow removal [105], event-based image deblurring [97], image denoising [98], XGC quality assessment [74], UGC video enhancement [93], night photography rendering [28], image super-resolution (x4) [12], real-world face restoration [13], efficient super-resolution [92], HR depth estimation [130], efficient burst HDR and restoration [58], cross-domain few-shot object detection [29], short-form UGC video quality assessment and enhancement [62, 63], text to image generation model quality assessment [36], day and night rain-drop removal for dual-focused images [61], video quality assessment for video conferencing [47], low light image enhancement [75], light field super-resolution [121], restore any image model (RAIM) in the wild [68], raw restoration and super-resolution [16] and raw reconstruction from RGB on smartphones [17]." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.389, + 0.907, + 0.426 + ], + "angle": 0, + "content": "2. NTIRE 2025 Efficient Super-Resolution Challenge" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.436, + 0.907, + 0.543 + ], + "angle": 0, + "content": "The goals of this challenge include: (i) promoting research in the area of single-imae efficient super-resolution, (ii) facilitating comparisons between the efficiency of various methods, and (iii) providing a platform for academic and industrial participants to engage, discuss, and potentially establish collaborations. This section delves into the specifics of the challenge." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.564, + 0.61, + 0.579 + ], + "angle": 0, + "content": "2.1. Dataset" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.59, + 0.905, + 0.863 + ], + "angle": 0, + "content": "The DIV2K [4] dataset and LSDIR [64] dataset are utilized for this challenge. The DIV2K dataset consists of 1,000 diverse 2K resolution RGB images, which are split into a training set of 800 images, a validation set of 100 images, and a test set of 100 images. The LSDIR dataset contains 86,991 high-resolution high-quality images, which are split into a training set of 84,991 images, a validation set of 1,000 images, and a test set of 1,000 images. In this challenge, the corresponding LR DIV2K images are generated by bicubic downsampling with a down-scaling factor of \\(4 \\times\\). The training images from DIV2K and LSDIR are provided to the participants of the challenge. During the validation phase, 100 images from the DIV2K validation set and 100 images from the LSDIR validation set are made available to participants. During the test phase, 100 images from the DIV2K test set and another 100 images from the LSDIR test set are used. Throughout the entire challenge, the testing HR images remain hidden from the participants." + }, + { + "type": "page_footnote", + "bbox": [ + 0.538, + 0.888, + 0.8, + 0.9 + ], + "angle": 0, + "content": "https://www.cvlai.net/ntire/2025/" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.505, + 0.936 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.091, + 0.091, + 0.3, + 0.106 + ], + "angle": 0, + "content": "2.2. EFDN Baseline Model" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.114, + 0.484, + 0.205 + ], + "angle": 0, + "content": "The Edge-Enhanced Feature Distillation Network (EFDN) [116] serves as the baseline model in this challenge. The aim is to improve its efficiency in terms of runtime, number of parameters, and FLOPs, while at least maintaining 26.90 dB on the DIV2K_LSDIR_valid dataset and 26.99 dB on the DIV2K_LSDIR_test dataset." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.206, + 0.484, + 0.476 + ], + "angle": 0, + "content": "The main idea within EFDN is a combination of block composing, architecture searching, and loss designing to obtain a trade-off between performance and lightweighting. Especially, For block composing, EFDN sum up the re-parameterization methods [20, 21, 138] and designs a more effective and complex edge-enhanced diverse branch block. In detail, they employ several reasonable reparameterizable branches to enhance the structural information extraction, and then they integrate them into a vanilla convolution to maintain the inference performance. To ensure the effective optimization of parallel branches in EDBB, they designed an edge-enhanced gradient-variance loss (EG) based on the gradient-variance loss [1]. The proposed loss enforces minimizing the difference between the computed variance maps, which is helpful to restore sharper edges. The gradient maps calculated by different filters and the corresponding EG loss. In addition, the NAS strategy of DLSR is adopted to search for a robust backbone." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.479, + 0.484, + 0.673 + ], + "angle": 0, + "content": "The baseline EFDN emerges as the 1st place for the overall performance of the NTIRE2023 Efficient SR Challenge [116]. The quantitative performance and efficiency metrics of EFDN are summarized as follows: (1) The number of parameters is \\(0.276\\mathrm{M}\\). (2) The average PSNRs on validation (DIV2K 100 valid images and LSDIR 100 valid images) and testing (DIV2K 100 test images and LSDIR 100 test images) sets of this challenge are 26.93 dB and 27.01 dB, respectively. (3) The runtime averaged to 22.18ms on the validation and test set with PyTorch \\(2.0.0 + \\mathrm{cu}118\\), and a single NVIDIA RTX A6000 GPU. (4) The number of FLOPs for an input of size \\(256\\times 256\\) is \\(16.70\\mathrm{G}\\)." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.687, + 0.315, + 0.703 + ], + "angle": 0, + "content": "2.3. Tracks and Competition" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.71, + 0.483, + 0.785 + ], + "angle": 0, + "content": "The aim of this challenge is to devise a network that reduces one or several aspects such as runtime, parameters, and FLOPs, while at least maintaining the 26.90 dB on the DIV2K_LSDIR valid dataset, and 26.99 dB on the DIV2K_LSDIR test dataset." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.795, + 0.484, + 0.902 + ], + "angle": 0, + "content": "Challenge phases: (1) Development and validation phase: Participants were given access to 800 LR/HR training image pairs and 200 LR/HR validation image pairs from the DIV2K and the LSDIR datasets. An additional 84,991 LR/HR training image pairs from the LSDIR dataset are also provided to the participants. The EFDN model, pretrained parameters, and validation demo script are available" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.092, + 0.907, + 0.334 + ], + "angle": 0, + "content": "on GitHub https://github.com/Amazingren/NTIRE2025_ESR, allowing participants to benchmark their models' runtime on their systems. Participants could upload their HR validation results to the evaluation server to calculate the PSNR of the super-resolved image produced by their models and receive immediate feedback. The corresponding number of parameters, FLOPs, and runtime will be computed by the participants. (2) Testing phase: In the final test phase, participants were granted access to 100 LR testing images from DIV2K and 100 LR testing images from LSDIR, while the HR ground-truth images remained hidden. Participants submitted their super-resolved results to the Codalab evaluation server and emailed the code and factsheet to the organizers. The organizers verified and ran the provided code to obtain the final results, which were then shared with participants at the end of the challenge." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.351, + 0.909, + 0.699 + ], + "angle": 0, + "content": "Evaluation protocol: Quantitative evaluation metrics included validation and testing PSNRs, runtime, FLOPs, and the number of parameters during inference. PSNR was measured by discarding a 4-pixel boundary around the images. The average runtime during inference was computed on the 200 LR validation images and the 200 LR testing images. The average runtime on the validation and testing sets served as the final runtime indicator. FLOPs are evaluated on an input image of size \\(256 \\times 256\\). Among these metrics, runtime was considered the most important. Participants were required to maintain a PSNR of at least 26.90 dB on the DIV2K_LSDIR valid dataset, and 26.99 dB on the DIV2K_LSDIR test dataset during the challenge. The constraint on the testing set helped prevent overfitting on the validation set. It's important to highlight that methods with a PSNR below the specified threshold (i.e., 26.90 dB on DIV2K_LSDIR_valid and, 26.99 dB on DIV2K_LSDIR_test) will not be considered for the subsequent ranking process. It is essential to meet the minimum PSNR requirement to be eligible for further evaluation and ranking. A code example for calculating these metrics is available at https://github.com/Amazingren/NTIRE2025_ESR." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.702, + 0.909, + 0.822 + ], + "angle": 0, + "content": "To better quantify the rankings, we followed the scoring function from NTIRE2024 ESR [91] for three evaluation metrics in this challenge: runtime, FLOPs, and parameters. This scoring aims to convert the performance of each metric into corresponding scores to make the rankings more significant. Especially, the score for each separate metric (i.e., Runtime, FLOPs, and parameter) for each sub-track is calculated as:" + }, + { + "type": "equation", + "bbox": [ + 0.56, + 0.838, + 0.907, + 0.871 + ], + "angle": 0, + "content": "\\[\n\\text {S c o r e} _ {\\text {M e t r i c}} = \\frac {\\operatorname {E x p} (2 \\times \\operatorname {M e t r i c} _ {\\text {T e a m X}})}{\\operatorname {M e t r i c} _ {\\text {B a s e l i n e}}}, \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.886, + 0.907, + 0.901 + ], + "angle": 0, + "content": "based on the score of each metric, the final score used for" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.505, + 0.936 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.091, + 0.092, + 0.296, + 0.105 + ], + "angle": 0, + "content": "the main track is calculated as:" + }, + { + "type": "equation", + "bbox": [ + 0.154, + 0.111, + 0.482, + 0.164 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\text {S c o r e} = w _ {1} \\times \\text {S c o r e} \\\\ + w _ {2} \\times S c o r e \\_ F L O P s \\tag {2} \\\\ + w _ {3} \\times S c o r e \\_ P a r a m s, \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.17, + 0.483, + 0.232 + ], + "angle": 0, + "content": "where \\( w_{1}, w_{2} \\), and \\( w_{3} \\) are set to 0.7, 0.15, and 0.15, respectively. This setting is intended to incentivize participants to design a method that prioritizes speed efficiency while maintaining a reasonable model complexity." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.242, + 0.266, + 0.259 + ], + "angle": 0, + "content": "3. Challenge Results" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.267, + 0.483, + 0.448 + ], + "angle": 0, + "content": "The final challenge results and the corresponding rankings are presented in Tab. 1 The table also includes the baseline method EFDN [116] for comparison. In Sec.4, the methods evaluated in Tab. 1 are briefly explained, while the team members are listed in A. The performance of different methods is compared from four different perspectives, including the runtime, FLOPs, the parameters, and the overall performance. Furthermore, in order to promote a fair competition emphasizing efficiency, the criteria for image reconstruction quality in terms of test PSNR are set to 26.90 and 26.99 on the DIV2K_LSDIR_valid and DIV2K_LSDIR_test sets, respectively." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.449, + 0.483, + 0.78 + ], + "angle": 0, + "content": "Runtime. In this challenge, runtime stands as the paramount evaluation metric. ShannonLab's solution emerges as the frontrunner with the shortest runtime among all entries in the efficient SR challenge, securing its top-3 ranking position. Following closely, the TSSR and mbga claim the second and third spots, respectively. Remarkably, the average runtime of the top three solutions on both the validation and test sets remains below \\(10\\mathrm{ms}\\). Impressively, the first 13 teams present solutions with an average runtime below \\(16\\mathrm{ms}\\), showcasing a continuous enhancement in the efficiency of image SR networks. Despite the slight differences in runtime among the top three teams, the challenge retains its competitive edge. An additional distinction from previous challenges worth noting is that this year, runtime performance no longer predominantly dictates the overall rankings as it has in the past, where the top three solutions in terms of runtime were also the top performers in the main track (e.g., from NTIRE ESR 2024 [91]). This shift indicates that participants are now emphasizing a more balanced approach, focusing not only on runtime optimization but also on improving the comprehensive performance of their models" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.781, + 0.483, + 0.902 + ], + "angle": 0, + "content": "Parameters. Model complexity was further evaluated by considering the number of parameters, as detailed in Table 1. In this sub-track, VEPG_C achieved the top position with only 0.044M parameters, closely followed by HannahSR and XUPTBoys with 0.060M and 0.072M parameters, respectively. The minimal disparity among the top three methods highlights their competitive edge and efficiency in managing model complexity. They were scored" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.092, + 0.905, + 0.152 + ], + "angle": 0, + "content": "at 1.38, 1.54, and 1.68, respectively, indicating a tight competition. However, it is noteworthy that these models also exhibited relatively high runtimes, suggesting an area for potential improvement in future iterations." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.154, + 0.907, + 0.639 + ], + "angle": 0, + "content": "FLOPs. The number of floating-point operations (FLOPs) is another critical metric for assessing model complexity. Within this sub-track, VEPG_C, XUPTBoys, and HannahSR secured the top three positions with FLOPs of 3.13G, 3.39G, and 3.75G, respectively. The competitiveness of this sub-track is further confirmed by the close scores of 1.45, 1.50, and 1.57, aligned with the parameter evaluation results. Remarkably, the same models top both the parameters and FLOPs evaluations, demonstrating consistent performance across different complexity metrics. Similar to the parameters sub-track, the extended runtimes of these methods point to a need for further research and optimization. Key implications include: i) Efficiency vs. Performance Trade-off: The close competition among the top models in terms of parameters and FLOPs suggests a significant trade-off between model efficiency and performance. Despite achieving minimal parameter counts and FLOPs, the high runtimes indicate that these models might be optimizing computational complexity at the expense of execution speed. This raises important considerations for future research in balancing efficiency with real-world usability, especially in applications where inference speed is critical. ii) Potential for Model Optimization: The consistency in ranking between the parameters and FLOPs sub-tracks reveals that models which are optimized for one aspect of computational efficiency tend to perform well in others. However, the noted high runtimes across these models suggest an untapped potential for holistic model optimization. Future work could focus on integrating more advanced optimization techniques or exploring novel architectural innovations to enhance both the computational efficiency and runtime performance." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.642, + 0.906, + 0.779 + ], + "angle": 0, + "content": "Overall Evaluation. The final assessment of performance employs a comprehensive metric that synthesizes runtime, FLOPs, and the number of parameters into a unified score. In this rigorous evaluation, the EMSR Group excelled, claiming the prestigious top position, followed by XiaomiMM (the winner of the NTIRE ESR 2024 challenge) and ShannonLab in second and third places, respectively. This achievement highlights the sophisticated engineering and innovative approaches implemented by these groups." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.781, + 0.909, + 0.902 + ], + "angle": 0, + "content": "Contrasting with the previous year, where runtime heavily influenced overall rankings, this year presents a shift. The best performer in runtime only secured third place in the overall competition. Specifically, EMSR, the overall winner, ranked fifth in runtime, sixth in parameters, and seventh in FLOPs. Similarly, XiaomiMM, which came second overall, was fourth in runtime, eleventh in parameters, and thirteenth in FLOPs. This demonstrates that: i) A balanced" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.505, + 0.936 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.09, + 0.089, + 0.907, + 0.201 + ], + "angle": 0, + "content": "Table 1. Results of Ninth NTIRE 2025 Efficient SR Challenge. The performance of the solutions is compared thoroughly from three perspectives including the runtime, FLOPs, and the number of parameters. The underscript numbers associated with each metric score denote the ranking of the solution in terms of that metric. For runtime, “Val.” is the runtime averaged on DIV2K_LSDIR_valid validation set. “Test” is the runtime averaged on a test set with 200 images from DIV2K_LSDIR_test set, respectively. “Ave.” is averaged on the validation and test datasets. “#Params” is the total number of parameters of a model. “FLOPs” denotes the floating point operations. Main Track combines all three evaluation metrics. The ranking for the main track is based on the score calculated via Eq. 2, and the ranking for other sub-tracks is based on the score of each metric via Eq. 1. Please note that this is not a challenge for PSNR improvement. The “validation/testing PSNR” is not ranked. For all the scores, the lower, the better." + }, + { + "type": "table", + "bbox": [ + 0.093, + 0.213, + 0.907, + 0.641 + ], + "angle": 0, + "content": "
TeamsPSNR [dB]Runtime [ms]#Params [M]FLOPs [G]Sub-Track ScoresMain-Track
Val.TestVal.TestAve.Runtime#ParamsFLOPsOverall ScoreRanking
EMSR26.9226.9910.2689.7209.9940.1318.542.46(5)2.58(6)2.78(7)2.531
XiaomiMM26.9227.009.9589.1329.5450.1489.682.36(4)2.92(11)3.19(13)2.572
ShannonLab26.9027.008.9388.3028.6200.17211.232.18(1)3.48(17)3.84(18)2.623
TSSR26.9027.029.8128.8989.3550.16410.692.32(2)3.28(15)3.60(16)2.664
Davinci26.9227.0011.4269.87610.6510.1469.552.61(6)2.88(9)3.14(11)2.735
SRCB26.9227.0011.4129.96010.6860.1469.552.62(7)2.88(9)3.14(11)2.746
Rochester26.9427.0111.93410.45411.1940.15810.302.74(8)3.14(14)3.43(14)2.917
mbga26.9027.009.8229.2089.5150.19212.562.36(3)4.02(19)4.50(20)2.938
IESR26.9026.9913.76012.58213.1710.1438.323.28(10)2.82(7)2.71(6)3.129
ASR26.9027.0013.86411.98412.9240.1549.063.21(9)3.05(12)2.96(8)3.1510
VPEG_O26.9026.9916.35613.92615.1410.1459.423.92(12)2.86(8)3.09(9)3.6311
mmSR26.9527.0514.45012.03613.2430.21213.853.30(11)4.65(21)5.25(23)3.8012
ChanSR26.9227.0316.73815.59216.1650.21011.594.29(16)4.58(20)4.01(19)4.2913
Pixel Alchemists26.9027.0117.32214.60815.9650.21312.934.22(14)4.68(22)4.70(21)4.3614
MiSR26.9027.0217.05614.98816.0220.21313.864.24(15)4.68(22)5.26(24)4.4615
LZ26.9027.0116.98015.45016.2150.25216.424.31(17)6.21(25)7.15(25)5.0216
Z626.9026.9920.36216.18418.2730.30318.705.19(20)8.99(27)9.39(27)6.3917
TACO_SR26.9427.0517.82815.65216.7400.34220.034.52(18)11.92(30)11.01(30)6.6118
AIOT_AI26.9027.0019.83618.15818.9970.30119.565.54(21)8.86(26)10.41(28)6.7719
JNU62026.9027.0120.68818.28219.4850.32520.315.79(22)10.54(29)11.39(31)7.3420
LVGroup_HFUT26.9627.0716.39414.87615.6350.42627.874.09(13)21.91(33)28.15(34)10.3821
SVM26.9227.0430.61028.13429.3720.25113.3914.13(23)6.16(24)4.97(22)11.5622
YG26.9227.0433.65831.61432.6360.0935.8218.96(24)1.96(5)2.01(5)13.8723
NanoSR26.9727.0817.93016.30017.1150.55136.024.68(19)54.20(35)74.72(35)22.6124
MegastudyEdu Vision AI27.0127.1339.37637.52838.4520.16910.6332.03(25)3.40(16)3.57(15)23.4725
XUPTBoys26.9127.0350.56435.01242.7880.0723.3947.36(26)1.68(3)1.50(2)33.6326
MILA26.9027.0244.36242.03443.1980.0874.9349.14(27)1.88(4)1.80(4)34.9527
AiMF_SR26.9827.1046.59443.09244.8430.1809.4857.00(28)3.69(18)3.11(10)40.9228
EagleSR27.0427.1647.73045.19246.4610.35221.8965.95(29)12.82(31)13.76(32)50.1529
BVIVSR26.9726.9949.48846.79848.1430.15510.7976.75(30)3.07(13)3.64(17)54.7330
HannahSR26.9027.0258.28641.42249.8540.0603.7589.55(31)1.54(2)1.57(3)63.1531
VPEG_C26.9027.0060.04640.95050.4980.0443.1394.90(32)1.38(1)1.45(1)66.8632
CUIT_HT27.0927.2062.03859.10660.5720.30919.75235.36(33)9.39(28)10.65(29)167.7633
GXZY AI27.0127.13102.92499.102101.0130.42825.889.02e3(34)22.23(34)22.18(33)6.32e334
SCMSR26.9227.00133.866114.088123.9770.39317.627.15e4(35)17.25(32)8.25(26)5.01e435
IPCV27.2727.40366.924357.268362.0960.86665.661.51e14(36)531.32(37)2.60e3(36)1.05e1436
X-L27.0727.21525.966479.346502.6560.96670.834.81e19(37)1.10e3(38)4.83e3(37)3.36e1937
Quantum Res27.2927.40574.632558.934566.7830.79076.091.56e22(38)306.32(36)9.07e3(38)1.09e2238
The following methods are not ranked since their validation/testing PSNR (underlined) is not on par with the threshold.
SylabSR24.3624.4628.58024.82626.7030.0727.9011.111.682.588.41-
NJUPCA26.7026.8070.20252.93261.5672.30830.11257.451.83e736.822.75e6-
DepthIBN26.5626.6639.15436.87638.0150.1217.7130.802.402.5222.30-
Cidaut AI26.8626.9527.22024.97426.0970.21012.8310.524.584.658.75-
IVL26.6626.7618.74616.94417.8450.24015.645.005.696.515.33-
Baseline26.9327.0123.91220.45422.1830.27616.77.397.397.397.39-
" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.667, + 0.482, + 0.803 + ], + "angle": 0, + "content": "approach to model design, optimizing across multiple metrics rather than focusing on a single aspect, is becoming crucial in competitive evaluations. ii) Achieving top performance in one metric does not guarantee similar success in overall rankings, underscoring the complexity of model optimization in real-world scenarios. This year's goal was to encourage a balanced pursuit of speed and efficiency, a challenge that has evidently led to significant innovations and advancements in model design." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.81, + 0.483, + 0.902 + ], + "angle": 0, + "content": "PSNR. Team Quantum Res, IPCV, X-L, and CUIT_HTT demonstrate superior PSNR values, a critical evaluation metric in super-resolution. Specifically, Quantum Res and IPCV lead with an exceptional 27.40 dB, closely followed by X-L with 27.21 dB, and CUIT_HTT at 27.20 dB on the DIV2K_LSDIR_test set. Despite these impressive perfor" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.667, + 0.907, + 0.895 + ], + "angle": 0, + "content": "mances, it is essential to emphasize that the primary focus of this challenge is on efficiency in super-resolution. Accordingly, we have adjusted the PSNR criteria, setting rigorous lower thresholds of 26.90 dB and 26.99 dB for the DIV2K_LSDIR_valid and DIV2K_LSDIR_test sets, respectively. This adjustment is designed to prioritize a balance between high performance and computational efficiency. A commendable total of 38 teams met this adjusted benchmark, demonstrating their capability to effectively balance image quality with efficiency. However, teams like IVL, Cidaut AI, SylabSR DepthIB, and NJUPCA, while notable for their efficiency, did not achieve the required PSNR levels. This highlights the ongoing challenge of optimizing super-resolution processes that meet both efficiency and performance standards, underscoring the complex nature of" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.505, + 0.936 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.092, + 0.092, + 0.27, + 0.105 + ], + "angle": 0, + "content": "advancements in this field." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.134, + 0.216, + 0.149 + ], + "angle": 0, + "content": "3.1. Main Ideas" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.162, + 0.483, + 0.254 + ], + "angle": 0, + "content": "Throughout this challenge, several techniques have been proposed to enhance the efficiency of deep neural networks for image super-resolution (SR) while striving to maintain optimal performance. The choice of techniques largely depends on the specific metrics that a team aims to optimize. Below, we outline some typical ideas that have emerged:" + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.267, + 0.483, + 0.373 + ], + "angle": 0, + "content": "- Distillation is an effective manner to maintain the PSNR performance without increasing computation cost during inference. The team EMSR added only the ConvLora-Like [7] operation into the base model. Similarly, team ESPAN also proposed to use the self-distillation for progressive learning strategy validated from [42]." + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.373, + 0.483, + 0.509 + ], + "angle": 0, + "content": "- Re-parameterization [22] [24, 126] is commonly used in this challenge. Usually, a normal convolutional layer with multiple basic operations (\\(3 \\times 3\\) convolution, \\(1 \\times 1\\) operation, first and second-order derivative operators, skip connections) is parameterized during training. During inference, the multiple operations that reparameterize a convolution could be merged back into a single convolution. e.g., Some top teams (i.e., XiaomiMM, mmSR, HannahSR, etc) used this operation in their methods." + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.51, + 0.483, + 0.6 + ], + "angle": 0, + "content": "- Parameter-free attention mechanism is validated as a useful technique to enhance computational efficiency [24, 126]. Specifically, XiaomiMM proposed a swift parameter-free attention network based on parameter-free attention, which achieves the lowest runtime while maintaining a decent PSNR performance." + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.6, + 0.483, + 0.69 + ], + "angle": 0, + "content": "- Incorporating multi-scale information and hierarchical module design are proven strategies for effectively fusing critical information. For instance, solutions such as HannahSR, XuPTBoys, and ChanSR have successfully utilized multi-scale residual connections and hierarchical module designs to enhance their performance." + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.69, + 0.483, + 0.765 + ], + "angle": 0, + "content": "- Network pruning plays an important role. It is observed that a couple of teams (i.e., ASR, Davinci) used network pruning techniques to slightly compress a network. This leads to a more lightweight architecture without a heavy performance drop." + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.766, + 0.483, + 0.841 + ], + "angle": 0, + "content": "- Exploration with new network architectures is conducted. Besides the common CNN or Transformers, the state space model (i.e., vision mamba [30, 32]) was tried by GXZY.AI in this challenge, which was also validated in the last NTIRE ESR challenge [91]." + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.841, + 0.483, + 0.902 + ], + "angle": 0, + "content": "- Various other techniques are also attempted. Some teams also proposed solutions based on neural architecture search, vision transformers, frequency processing, multi-stage design, and advanced training strategies." + }, + { + "type": "list", + "bbox": [ + 0.091, + 0.267, + 0.483, + 0.902 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.091, + 0.616, + 0.106 + ], + "angle": 0, + "content": "3.2. Fairness" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.113, + 0.907, + 0.401 + ], + "angle": 0, + "content": "To ensure the integrity and fairness of the Efficient SR Challenge, we meticulously established a set of rules focusing on the permissible datasets for training the models. Participants were allowed to augment their training with external datasets, such as Flickr2K, to promote diverse and comprehensive model training experiences. However, to guarantee an unbiased evaluation, the use of additional DIV2K and LSDIR validation sets, which include both high-resolution (HR) and low-resolution (LR) images, was explicitly prohibited during the training phase. This restriction aimed to maintain the validation set's integrity as a vital benchmark for assessing the proposed networks' performance and generalizability. Moreover, using LR images from the DIV2K and LSDIR test sets for training was strictly forbidden, ensuring the test dataset's purity and upholding the evaluation process's integrity. Lastly, the adoption of advanced data augmentation techniques during training was encouraged as a fair practice, allowing participants to enhance their models within the defined rules and guidelines." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.41, + 0.644, + 0.424 + ], + "angle": 0, + "content": "3.3. Conclusions" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.432, + 0.906, + 0.462 + ], + "angle": 0, + "content": "The analysis of the submissions to this year's Efficient SR Challenge allows us to draw several important conclusions:" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.463, + 0.905, + 0.568 + ], + "angle": 0, + "content": "- Firstly, the competition within the image super-resolution (SR) community remains intense. This year, the challenge attracted 244 registered participants, with 43 teams making valid submissions. All proposed methods have enhanced the state-of-the-art in efficient SR. Notably, the competition among the top three teams has intensified, with last year's winner ranking second this year." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.568, + 0.906, + 0.629 + ], + "angle": 0, + "content": "- Secondly, unlike in previous challenges, dominance in runtime no longer characterizes the top-ranking teams. Instead, more balanced solutions that consider all aspects of performance are proving to be more beneficial." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.629, + 0.905, + 0.69 + ], + "angle": 0, + "content": "- Thirdly, consistent with the success of deep learning techniques like DeepSeek, the distillation approach has significantly contributed to performance improvements without adding computational complexity." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.69, + 0.905, + 0.75 + ], + "angle": 0, + "content": "- Fourthly, re-parameterization and network compression have emerged as crucial techniques in enhancing efficiency in SR. Ongoing exploration in these areas is encouraged to further boost efficiency." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.75, + 0.905, + 0.84 + ], + "angle": 0, + "content": "- Fifthly, the use of large-scale datasets, such as the one described in [64], for pre-training has been shown to enhance accuracy significantly. Typically, training incorporates multiple phases, gradually increasing the patch size and decreasing the learning rate, optimizing the training process." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.84, + 0.905, + 0.885 + ], + "angle": 0, + "content": "- Sixthly, this year's challenge saw the introduction of the state space model, presenting a novel approach that may influence future research directions in the field." + }, + { + "type": "list", + "bbox": [ + 0.512, + 0.463, + 0.906, + 0.885 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.532, + 0.886, + 0.905, + 0.902 + ], + "angle": 0, + "content": "Overall, by considering factors like runtime, FLOPs," + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.926, + 0.505, + 0.937 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.09, + 0.092, + 0.485, + 0.244 + ], + "angle": 0, + "content": "and parameter count simultaneously, it is feasible to design models that optimize across multiple evaluation metrics. Finally, as computational capabilities continue to evolve, the focus on optimizing models for runtime, FLOPs, and parameter efficiency becomes increasingly vital. With advancements in both hardware and software, we expect the development of more sophisticated and efficient models in the super-resolution domain. The pursuit of efficiency in SR is likely to remain a key driver of innovation, promising exciting advancements and continual progress in the field." + }, + { + "type": "title", + "bbox": [ + 0.09, + 0.26, + 0.373, + 0.277 + ], + "angle": 0, + "content": "4. Challenge Methods and Teams" + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.286, + 0.181, + 0.3 + ], + "angle": 0, + "content": "4.1. EMSR" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.308, + 0.484, + 0.445 + ], + "angle": 0, + "content": "Method. The overall architecture of the team EMSR is shown in Fig. 1, which is based on the leading efficient super-resolution method SPAN [112]. Inspired by ConvLora [7], the team proposes SconvLB, which incorporates ConvLora into SPAB to improve performance without increasing computation complexity. Specifically, given a pre-trained convolutional layer in SPAB, they update it by adding Lora layers, and representing it with a low-rank decomposition:" + }, + { + "type": "equation", + "bbox": [ + 0.193, + 0.459, + 0.483, + 0.475 + ], + "angle": 0, + "content": "\\[\nW _ {\\text {C o n v L o r a}} = W _ {P T} + X Y, \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.489, + 0.484, + 0.595 + ], + "angle": 0, + "content": "where \\( W_{ConvLora} \\) denotes the updated weight parameters of the convolution, \\( W_{PT} \\) denotes the original pre-trained parameters of the convolution, \\( X \\) is initialized by random Gaussian distribution, and \\( Y \\) is zero in the beginning of training. Note that the Lora weights can be merged into the main backbone. Therefore, ConvLoras don't introduce extra computation during inference." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.596, + 0.484, + 0.687 + ], + "angle": 0, + "content": "They adopt the pre-trained SPAN-Tiny model [112] with 26 channels. They replace the SPAB in SPAN with our proposed SconvLB, and also add ConvLora into the pixel shuffle block and the convolution before it. During training, they freeze the original weight and bias of the convolution and only update the Lora parameters." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.688, + 0.484, + 0.853 + ], + "angle": 0, + "content": "Optimization. To supervise the optimization of SconvLB, they adopt a knowledge-based distillation training strategy. They adopt spatial affinity-based knowledge distillation [37] to transfer second-order statistical info from the teacher model to the student model by aligning spatial feature affinity matrices at multiple layers of the networks. Given a feature \\( F_{l} \\in R^{B \\times C \\times W \\times H} \\) extracted from the \\( l \\)-th layer of the network, they first flatten the tensor along the last two dimensions and calculate the affinity matrix \\( A_{\\text{spatial}} \\). Then the spatial feature affinity-based distillation loss can be formulated as:" + }, + { + "type": "equation", + "bbox": [ + 0.183, + 0.865, + 0.483, + 0.904 + ], + "angle": 0, + "content": "\\[\nL _ {A D} = \\frac {1}{| A |} \\sum_ {l = 1} ^ {n} \\left\\| A _ {l} ^ {S} - A _ {l} ^ {T} \\right\\| _ {1}, \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.091, + 0.905, + 0.166 + ], + "angle": 0, + "content": "where \\( A_{l}^{S} \\) and \\( A_{l}^{T} \\) are the spatial affinity matrix of student and teacher networks extracted from the feature maps of the \\( l \\)-th layer, respectively. \\( |A| \\) denotes the number of elements in the affinity matrix. Specifically, the team applies the distillation loss after each SconvLB." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.168, + 0.905, + 0.199 + ], + "angle": 0, + "content": "Except for the distillation loss in the feature space, the team applies a pixel-level distillation loss:" + }, + { + "type": "equation", + "bbox": [ + 0.604, + 0.21, + 0.905, + 0.228 + ], + "angle": 0, + "content": "\\[\nL _ {T S} = \\left\\| \\mathcal {T} \\left(I _ {L R}\\right) - \\mathcal {S} \\left(I _ {L R}\\right) \\right\\| _ {1}, \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.24, + 0.905, + 0.269 + ], + "angle": 0, + "content": "where \\(\\mathcal{T}\\) and \\(S\\) denote the teacher network and the student network, respectively. \\(I_{LR}\\) denotes the LR image." + }, + { + "type": "text", + "bbox": [ + 0.534, + 0.27, + 0.724, + 0.285 + ], + "angle": 0, + "content": "They also apply the \\(L_{2}\\) loss:" + }, + { + "type": "equation", + "bbox": [ + 0.617, + 0.297, + 0.905, + 0.315 + ], + "angle": 0, + "content": "\\[\nL _ {r e c} = \\left\\| I _ {H R} - \\mathcal {S} \\left(I _ {L R}\\right) \\right\\| _ {2} ^ {2}, \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.327, + 0.905, + 0.356 + ], + "angle": 0, + "content": "where \\( I_{HR} \\) denotes the ground truth high-resolution image. The overall loss is:" + }, + { + "type": "equation", + "bbox": [ + 0.583, + 0.37, + 0.905, + 0.387 + ], + "angle": 0, + "content": "\\[\nL _ {t o t a l} = \\lambda_ {1} L _ {r e c} + \\lambda_ {2} L _ {T S} + \\lambda_ {3} L _ {A D}. \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.399, + 0.905, + 0.459 + ], + "angle": 0, + "content": "Training Details. The team uses DIV2K and LSDIR for training. Random flipping and random rotation are used for data augmentation. The training process is divided into two stages." + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.46, + 0.905, + 0.535 + ], + "angle": 0, + "content": "1. Stage One: HR patches of size \\( 192 \\times 192 \\) are randomly cropped from HR images, and the mini-batch size is set to 8. The model is trained by minimizing the \\( L_{total} \\) mentioned above with the Adam optimizer. The learning rate is \\( 1 \\times 10^{-4} \\). A total of \\( 30k \\) iterations are trained." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.536, + 0.905, + 0.581 + ], + "angle": 0, + "content": "2. Stage Two: In the second stage, the team increases the size of the HR image patches to \\( 256 \\times 256 \\), with other settings remaining the same as in the first stage." + }, + { + "type": "list", + "bbox": [ + 0.514, + 0.46, + 0.905, + 0.581 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.582, + 0.905, + 0.627 + ], + "angle": 0, + "content": "Throughout the entire training process, they employ an Exponential Moving Average (EMA) strategy to enhance the robustness of training." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.637, + 0.64, + 0.652 + ], + "angle": 0, + "content": "4.2. XiaomiMM" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.659, + 0.907, + 0.901 + ], + "angle": 0, + "content": "Method Details. The team proposes an accelerated variant of the Swift Parameter-free Attention Network (SPAN) [112], called SPANF, which is built upon the fundamental SPAB block. To enhance the inference speed, SPANF introduces several key modifications compared to the original SPAN model. Firstly, they remove the last SPAB block, which reduces computational complexity without significantly impacting performance. Secondly, they increase the number of channels to 32, providing a better balance between model capacity and speed. Thirdly, they replace the first convolution layer with a nearest neighbor upsampling operation, which is computationally less intensive and accelerates the upsampling process. Lastly, they implement simple modifications to the shortcut connections within the network to further streamline computations. These changes collectively enable SPANF to achieve faster" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.504, + 0.936 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.095, + 0.094, + 0.904, + 0.292 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.095, + 0.309, + 0.9, + 0.325 + ], + "angle": 0, + "content": "Figure 1. Team EMSR: The team incorporates ConvLoras into the network to increase the performance without adding extra complexity." + }, + { + "type": "image", + "bbox": [ + 0.097, + 0.343, + 0.476, + 0.503 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.516, + 0.483, + 0.558 + ], + "angle": 0, + "content": "Figure 2. The proposed SPANF architecture. The main structure is basically the same as SPAN [112], but one SPAB module is reduced, and the number of channels is 32." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.595, + 0.483, + 0.699 + ], + "angle": 0, + "content": "inference speeds while maintaining competitive image quality. The evaluations on multiple benchmarks demonstrate that SPANF not only upholds the efficiency of SPAN's parameter-free attention mechanism but also offers superior speed, making it highly suitable for real-world applications, particularly in scenarios with limited computational resources." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.705, + 0.484, + 0.902 + ], + "angle": 0, + "content": "Implementation Details. The dataset utilized for training comprises of DIV2K and LSDIR. During each training batch, 64 HR RGB patches are cropped, measuring \\(256 \\times 256\\), and subjected to random flipping and rotation. The learning rate is initialized at \\(5 \\times 10^{-4}\\) and undergoes a halving process every \\(2 \\times 10^{5}\\) iterations. The network undergoes training for a total of \\(10^{6}\\) iterations, with the L1 loss function being minimized through the utilization of the Adam optimizer [54]. They repeated the aforementioned training settings four times after loading the trained weights. Subsequently, fine-tuning is executed using the L1 and L2 loss functions, with an initial learning rate of \\(1 \\times 10^{-5}\\) for \\(5 \\times 10^{5}\\) iterations, and HR patch size of 512. They con" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.339, + 0.907, + 0.398 + ], + "angle": 0, + "content": "duced finetuning on four models utilizing both L1 and L2 losses, and employed batch sizes of 64 and 128. Finally, they integrated these models' parameters to obtain our ultimate model." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.409, + 0.65, + 0.423 + ], + "angle": 0, + "content": "4.3. ShannonLab" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.431, + 0.907, + 0.643 + ], + "angle": 0, + "content": "Method. The method proposed by the team draws inspiration from ECBSR and SPAN. First, they optimized the ECB module by introducing a 1x1 convolutional layer for channel expansion before the input tensor enters the ECB module. After processing, another 1x1 convolution restores the original channel dimensions, while incorporating residual connections. During inference, these components can be merged into a standard 3x3 convolution through reparameterization, thereby enhancing the ECB module's effectiveness without increasing computational overhead. As illustrated in Fig.3, The complete model architecture of TSR comprises a shallow feature extraction convolution, a reconstruction convolution, a PixelShuffle module, and four REECB block which made of stacked optimized ECB." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.643, + 0.906, + 0.718 + ], + "angle": 0, + "content": "Training Details. The model is trained on the DIV2K and LSDIR train dataset with random flipping and rotation applied for data augmentation. The Adam optimizer is consistently employed throughout the training process. The entire training process is divided into five steps." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.72, + 0.905, + 0.793 + ], + "angle": 0, + "content": "1. HR patches of size \\(256 \\times 256\\) are randomly cropped from HR images, and the mini-batch size is set to 32. L1 loss is used and the initial learning rate is set to 5e-4, with a cosine learning rate decay strategy. The total iterations is 500k." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.795, + 0.905, + 0.869 + ], + "angle": 0, + "content": "2. HR patches of size \\( 256 \\times 256 \\) are randomly cropped from HR images, and the mini-batch size is set to 32. L1 and L2 loss is used and the initial learning rate is set to 5e-4, with a cosine learning rate decay strategy. The total iterations is 1000k." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.871, + 0.905, + 0.901 + ], + "angle": 0, + "content": "3. HR patches of size \\(512 \\times 512\\) are randomly cropped from HR images, and the mini-batch size is set to 64. L2" + }, + { + "type": "list", + "bbox": [ + 0.512, + 0.72, + 0.905, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.504, + 0.936 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.099, + 0.094, + 0.48, + 0.14 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.135, + 0.153, + 0.438, + 0.168 + ], + "angle": 0, + "content": "Figure 3. Team ShannonLab: The pipeline of TSR." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.192, + 0.483, + 0.235 + ], + "angle": 0, + "content": "loss is used and the initial learning rate is set to 2e-4, with a cosine learning rate decay strategy. The total iterations is \\(1000\\mathrm{k}\\)." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.237, + 0.483, + 0.311 + ], + "angle": 0, + "content": "4. HR patches of size \\(512 \\times 512\\) are randomly cropped from HR images, and the mini-batch size is set to 64. L2 loss is used and the initial learning rate is set to 1e-4, with a cosine learning rate decay strategy. The total iterations is 1000k." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.312, + 0.483, + 0.386 + ], + "angle": 0, + "content": "5. HR patches of size \\( 512 \\times 512 \\) are randomly cropped from HR images, and the mini-batch size is set to 64. L2 loss is used and the initial learning rate is set to 1e-5, with a cosine learning rate decay strategy. The total iterations is 1000k." + }, + { + "type": "list", + "bbox": [ + 0.09, + 0.237, + 0.483, + 0.386 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.395, + 0.173, + 0.41 + ], + "angle": 0, + "content": "4.4. TSSR" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.418, + 0.483, + 0.478 + ], + "angle": 0, + "content": "Method. They combined the ideas of reparameterization and attention mechanism to design a model that can capture image information in the network and effectively achieve image super-resolution." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.479, + 0.483, + 0.509 + ], + "angle": 0, + "content": "Training Details. The training process is divided into three steps." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.509, + 0.483, + 0.583 + ], + "angle": 0, + "content": "1. HR patches of size \\(256 \\times 256\\) are randomly cropped from HR images, and the mini-batch size is set to 64. L1 loss with AdamW optimizer is used and the initial learning rate is set to 0.0005 and halved at every 100k iterations. The total iterations is 500k." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.584, + 0.483, + 0.658 + ], + "angle": 0, + "content": "2. HR patches of size \\( 256 \\times 256 \\) are randomly cropped from HR images, and the mini-batch size is set to 64. L1 and L2 loss with AdamW optimizer is used and the initial learning rate is set to 0.0002 and halved at every 100k iterations. The total iterations is 1000k." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.66, + 0.483, + 0.735 + ], + "angle": 0, + "content": "3. HR patches of size \\(512 \\times 512\\) are randomly cropped from HR images, and the mini-batch size is set to 64. L2 loss with AdamW optimizer is used and the initial learning rate is set to 0.0001 and halved at every 100k iterations. The total iterations is 1000k." + }, + { + "type": "list", + "bbox": [ + 0.09, + 0.509, + 0.483, + 0.735 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.743, + 0.172, + 0.759 + ], + "angle": 0, + "content": "4.5. mbga" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.765, + 0.483, + 0.901 + ], + "angle": 0, + "content": "Architecture. The team proposes the ESPAN, which is based on SPAN [111]. Through evaluations of depth-channel combinations in SPAN on an A6000 GPU, they determined that setting the number of channels to 32 yields higher efficiency than 28 channels. To reduce parameters and FLOPs, a depth of 6 was adopted. Additionally, a \\(9 \\times 9\\) convolution replaced the conventional \\(3 \\times 3\\) convolution at the network's input stage since they find that \\(9 \\times 9\\) convolution is faster than \\(3 \\times 3\\) convolution on A6000." + }, + { + "type": "image", + "bbox": [ + 0.6, + 0.099, + 0.825, + 0.281 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.555, + 0.302, + 0.863, + 0.316 + ], + "angle": 0, + "content": "Figure 4. Team mbga: General Reparameterization." + }, + { + "type": "image", + "bbox": [ + 0.517, + 0.334, + 0.907, + 0.436 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.555, + 0.445, + 0.863, + 0.46 + ], + "angle": 0, + "content": "Figure 5. Team mbga: ESPAN with self distillation." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.49, + 0.907, + 0.642 + ], + "angle": 0, + "content": "General Reparameterization. Inspired by MobileOne [107] and RepVGG [23], the team proposes a generalized reparameterization block (Fig. 4). The block consists of four \\(1 \\times 1 - 3 \\times 3\\) convolution branches, one \\(1 \\times 1\\) convolution branch, and one \\(3 \\times 3\\) convolution branch. Skip connections are omitted due to empirical observations of training instability. While additional duplicated branches or \\(3 \\times 3 - 1 \\times 1\\) convolution branches are feasible, the current configuration is found to offer superior performance consistency during optimization." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.643, + 0.909, + 0.868 + ], + "angle": 0, + "content": "Self distillation and progressive learning. Inspired by RIFE [42], self-distillation is incorporated into their training pipeline. The teacher model shares the identical backbone as the student model but includes three extra SPAB blocks appended to the student's backbone (Fig. 5). A self-distillation loss similar to RIFE's formulation is adopted to co-train the teacher and student networks. This design enables the teacher model to learn robust backbone features. After the distillation phase, the student loss and distillation loss components are removed, and the entire teacher model is fine-tuned. Leveraging the pre-trained robust teacher, progressive learning is employed: the extra SPAB blocks are gradually removed from the teacher's backbone, finally resulting in an architecture identical to the original student model." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.871, + 0.908, + 0.903 + ], + "angle": 0, + "content": "Frequency-Aware Loss. Since small models have limited parameters, during training, they should make the model fo" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.505, + 0.936 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.09, + 0.092, + 0.482, + 0.227 + ], + "angle": 0, + "content": "cus more on important (or difficult) areas. In their methods, two types of frequency-aware losses are employed. The first type is the DCT loss. They use the discrete cosine transform (DCT) to convert the RGB domain to the frequency domain and then apply the L1 loss to calculate the difference. The other type is the edge loss. They add a blur to the image and then subtract the blurred image from the original one to obtain the high frequency area. Subsequently, the L1 loss is calculated on this high frequency area." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.228, + 0.482, + 0.273 + ], + "angle": 0, + "content": "Training details: The training process contains two stages. And the training dataset is the DIV2K_LSDIR_train. General reparameterization is used on the whole process." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.273, + 0.482, + 0.303 + ], + "angle": 0, + "content": "I. At the first stage, they use self distillation to train the teacher model." + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.306, + 0.483, + 0.456 + ], + "angle": 0, + "content": "- Step1. The team first trains a 2x super-resolution model. HR patches of size 256x256 are randomly cropped from HR images, and the mini-batch size is set to 64. L1 loss and self distillation loss with AdamW optimizer are used and the initial learning rate is set to 0.0001 and halved at every 100k iterations. The total iterations is 500k. This step is repeated twice. And then they follow the same training setting and use 2x super-resolution model as pretrained model to train a 4x super-resolution model. This step is repeated twice." + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.457, + 0.483, + 0.561 + ], + "angle": 0, + "content": "- Step2. HR patches of size 512x512 are randomly cropped from HR images, and the mini-batch size is set to 16. MSE loss, frequency-aware loss and self distillation loss with AdamW optimizer are used and the initial learning rate is set to 0.0001 and halved at every 100k iterations. The total iterations is 500k. This step is also repeated twice." + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.563, + 0.483, + 0.668 + ], + "angle": 0, + "content": "- Step3. They only train the teacher model. HR patches of size 512x512 are randomly cropped from HR images, and the mini-batch size is set to 16. MSE loss and frequency-aware loss with AdamW optimizer are used and the initial learning rate is set to 0.00005 and halved at every 100k iterations. The total iterations is 500k. This step is also repeated twice." + }, + { + "type": "list", + "bbox": [ + 0.091, + 0.306, + 0.483, + 0.668 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.672, + 0.483, + 0.702 + ], + "angle": 0, + "content": "II. At the second stage, they use progressive learning to get the final student model." + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.705, + 0.483, + 0.794 + ], + "angle": 0, + "content": "- Step4. They drop the additional SPAB block one by one. HR patches of size 512x512 are randomly cropped from HR images, and the mini-batch size is set to 16. L1 loss with AdamW optimizer are used and the initial learning rate is set to 0.0001 and halved at every 100k iterations. The total iterations is 500k." + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.796, + 0.483, + 0.9 + ], + "angle": 0, + "content": "- Step5. They repeat the following training process many times until convergence. HR patches of size 512x512 are randomly cropped from HR images, and the mini-batch size is set to 16. MSE loss and frequency-aware loss with AdamW optimizer are used and the initial learning rate is set to 0.00005 and halved at every 100k iterations. The total iterations is 500k." + }, + { + "type": "list", + "bbox": [ + 0.091, + 0.705, + 0.483, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.091, + 0.622, + 0.106 + ], + "angle": 0, + "content": "4.6. VPEG_C" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.113, + 0.905, + 0.203 + ], + "angle": 0, + "content": "General Method Description. As illustrated in Fig. 6, they propose a Dual Attention Network (DAN) for the lightweight single-image super-resolution task. The core components of DAN consist of three parts: a Local Residual Block (LRB), a Spatial Attention Block (SAB), and a Channel Attention Block (CAB)." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.204, + 0.905, + 0.309 + ], + "angle": 0, + "content": "Local Residual Block (LRB). They leverage the \\(1 \\times 1\\) convolution layers followed by a \\(3 \\times 3\\) depthwise convolution as the basic unit, repeated three times. Specially, GELU activation is applied on each layers, and the features are passed in a densely connected manner. At the end of the block, feature maps from different levels are aggregated using channel concatenation, effectively capturing local image details." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.31, + 0.905, + 0.384 + ], + "angle": 0, + "content": "Spatial Attention Block (SAB). They adopt the spatial attention design of SMFANet [144], which employs a variance-constrained feature modulation mechanism to aggregate spatial feature. This allows efficient spatial interaction with minimal computational cost." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.385, + 0.905, + 0.474 + ], + "angle": 0, + "content": "Channel Attention Block (CAB). Global channel-wise information is modeled through a self-gating mechanism that enhances local representations and increases model non-linearity. This is followed by a key-value shared MDTA [132] for global interaction and a GDFN [132] for feature refinement." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.476, + 0.905, + 0.521 + ], + "angle": 0, + "content": "Training Description. The proposed DAN consists of 6 feature mixing modules with 16 channels. The training process is divided into two stages:" + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.523, + 0.905, + 0.643 + ], + "angle": 0, + "content": "1. Pre-training Stage: They pre-train DAN using 800 images from the DIV2K [100] and the first 10K images of the LSDIR [64] datasets. The cropped LR image size is \\(72 \\times 72\\), and the mini-batch size is set to 64. The DAN is trained by minimizing L1 loss and the frequency loss[14] with Adam optimizer for total 800, 000 iterations. The initial learning rate is set to 2e-3 and halved at 200K, 400K, 600K, 700K." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.644, + 0.905, + 0.763 + ], + "angle": 0, + "content": "2. Fine-tuning Stage: They fine-tune the model on the 800 images of DIV2K [100] and the first 10K images of the LSDIR [64] datasets. The cropped LR image size is \\(72 \\times 72\\), and the mini-batch size is set to 64. The DAN is trained by minimizing PSNR loss with the Adam optimizer for total 200, 000 iterations. They set the initial learning rate to 5e-4 and halve it at 50K, 100K, 150K, and 175 K." + }, + { + "type": "list", + "bbox": [ + 0.508, + 0.523, + 0.905, + 0.763 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.773, + 0.637, + 0.789 + ], + "angle": 0, + "content": "4.7. XUPTBoys" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.795, + 0.905, + 0.855 + ], + "angle": 0, + "content": "General Method Description. The XUPTBoys team proposed the Frequency-Guided Multilevel Dispersion Network (FMDN), as shown in Fig. 7.FMDN adopts a similar basic framework to [45, 67, 71, 81]." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.856, + 0.905, + 0.901 + ], + "angle": 0, + "content": "Based on the above analysis, they propose the new Frequency-Guided Multi-level Dispersion Block(FMDB) and the new Frequency-Guided Multi-level Dispersion" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.925, + 0.509, + 0.937 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.156, + 0.107, + 0.825, + 0.403 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.343, + 0.419, + 0.655, + 0.433 + ], + "angle": 0, + "content": "Figure 6. Team VPEG_C: An overview of the DAN." + }, + { + "type": "image", + "bbox": [ + 0.13, + 0.449, + 0.818, + 0.57 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.157, + 0.58, + 0.839, + 0.596 + ], + "angle": 0, + "content": "Figure 7. Team XUPTBoys: The whole framework of Frequency-Guided Multi-level Dispersion Network (FMDN)." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.621, + 0.484, + 0.788 + ], + "angle": 0, + "content": "Block Basic(FMDB-B) as the base block of FMDN. As shown in Fig. 8 they use Hierarchical Variance-guided Spatial Attention(HVSA), Reallocated Contrast-Aware Channel Attention (RCCA) as alternatives to Enhanced Spatial Attention (ESA) [73] and Contrast-Aware Channel Attention (CCA) [44], Frequency-Guided Residual block (FRB), Asymmetric FeedForward Network (AFFN), Multilevel Residual Convolution (MRConv) and Multilevel Residual Convolution Basic(MRConv-B). The difference between FMDB and FMDB-B is that the former uses MRConv, while the latter uses MRConv-B." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.795, + 0.484, + 0.903 + ], + "angle": 0, + "content": "In HVSA, the effects of multilevel branching and local variance on performance are examined. Small-window multilevel branches fail to capture sufficient information, while local variance within a single branch can create significant weight disparities. To address these issues, [81] was enhanced to introduce the D5 and D7 branches, which effectively utilize local variance to capture information" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.621, + 0.908, + 0.894 + ], + "angle": 0, + "content": "rich regions while balancing performance and complexity. In RCCA, this approach improves the traditional channel attention mechanism by not only reallocating weights across channels but also better managing shared information among them. Introduces complementary branches with \\(1 \\times 1\\) convolutions and GELU activation functions, which help redistribute complementary information, improving the uniqueness of each channel. In FRB, it enhances feature representation using convolutional layers and GELU activation. It normalizes input, extracts features with depthwise convolutions of different kernel sizes, and combines them through residual connections to preserve spatial information for effective image processing. In AFFN, it applies layer normalization and a \\(1 \\times 1\\) convolution to expand feature dimensions. It then uses two depthwise convolutions with different kernel sizes, combines the results with GELU activation, and projects the output back to the original dimension with a residual connection. In MRConv and" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.925, + 0.508, + 0.937 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.104, + 0.09, + 0.556, + 0.173 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.565, + 0.091, + 0.842, + 0.176 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.664, + 0.179, + 0.731, + 0.189 + ], + "angle": 0, + "content": "(f) MRCov-B" + }, + { + "type": "image", + "bbox": [ + 0.127, + 0.178, + 0.37, + 0.444 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.237, + 0.446, + 0.284, + 0.457 + ], + "angle": 0, + "content": "(b) HVSA" + }, + { + "type": "image", + "bbox": [ + 0.393, + 0.198, + 0.517, + 0.442 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.432, + 0.444, + 0.469, + 0.455 + ], + "angle": 0, + "content": "(c) FRB" + }, + { + "type": "image", + "bbox": [ + 0.539, + 0.218, + 0.65, + 0.43 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.56, + 0.444, + 0.604, + 0.455 + ], + "angle": 0, + "content": "(d) AFFN" + }, + { + "type": "image", + "bbox": [ + 0.668, + 0.226, + 0.811, + 0.422 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.693, + 0.442, + 0.74, + 0.452 + ], + "angle": 0, + "content": "(e) RCCA" + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.476, + 0.907, + 0.531 + ], + "angle": 0, + "content": "Figure 8. Team XUPTBoys: The details of each component. (a) FMDB: Frequency-Guided Multi-level Dispersion Block; (b) HVSA: Hierarchical Variance-guided Spatial Attention; (c) FRB: Frequency-Guided Residual Block; (d) AFFN: Asymmetric FeedForward Network; (e) RCCA: Reallocated Contrast-aware Channel Attention; (f) MRConv-B/MRConv: Multilevel Residual Convolution Basic and Multilevel Residual Convolution" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.559, + 0.483, + 0.62 + ], + "angle": 0, + "content": "MRConv-B, MRConv and MRConv-B use convolution kernels of different sizes for parallel convolution, and finally activate the output using GELU and combine it with residual connections, effectively preserving spatial information." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.624, + 0.484, + 0.685 + ], + "angle": 0, + "content": "Training Description. The proposed FMDN has 3 FMDB-Basic blocks and 1 FMDB block, in which the number of feature channels is set to 24. The details of the training steps are as follows:" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.689, + 0.483, + 0.809 + ], + "angle": 0, + "content": "1. Pretraining on the DIV2K [102] and and Flickr2K [70]. HR patches of size \\( 256 \\times 256 \\) are randomly cropped from HR images, and the mini-batch size is set to 64. The model is trained by minimizing the L1 loss function [77] with the Adam optimizer [53]. The initial learning rate is set to \\( 2 \\times 10^{-3} \\) and halved at \\( \\{100k, 500k, 800k, 900k, 950k\\} \\)-iteration. The total number of iterations is \\( 1000k \\)." + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.811, + 0.484, + 0.902 + ], + "angle": 0, + "content": "2. Finetuning on 800 images of DIV2K and the first 10k images of LSDIR [64]. HR patch size and mini-batch size are set to \\(384 \\times 384\\) and 64, respectively. The model is fine-tuned by minimizing L2 loss function [77]. The initial learning rate is set to \\(5 \\times 10^{-4}\\) and halved at \\(\\{500k\\}\\)-iteration. The total number of iterations is" + }, + { + "type": "list", + "bbox": [ + 0.091, + 0.689, + 0.484, + 0.902 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.536, + 0.559, + 0.585, + 0.572 + ], + "angle": 0, + "content": "1000k." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.588, + 0.636, + 0.603 + ], + "angle": 0, + "content": "4.8. HannahSR" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.612, + 0.907, + 0.793 + ], + "angle": 0, + "content": "General Method Description. The architecture of the proposed network is depicted in Fig. 9, which is inspired by previous studies such as AGDN [114], MDRN [80] and SPAN [109]. They propose a Multi-level Refinement and Bias-learnable Attention dual branch Network (MRBAN). More specifically, they build upon the AGDN framework by constructing another branch consisting of one \\(3 \\times 3\\) convolution layer (ISRB) and one \\(1 \\times 1\\) convolution layer to enhance the overall performance in a learnable way. Meanwhile, they replace the concat module in the AGDN with a direct element-wise summation, for the sake of harvesting significant savings of the parameters." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.796, + 0.909, + 0.903 + ], + "angle": 0, + "content": "In addition, they propose the multi-level refinement and bias-learnable attention block (MRBAB) as the basic block of our network. As described in Figure 10, they attempt to minimize the information loss induced by Sigmoid module. When confronted with a negative input with a large absolute value, the output of the Sigmoid module will be approximately equal to zero, which results in remarkable" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.925, + 0.509, + 0.937 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.099, + 0.092, + 0.9, + 0.201 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.212, + 0.907, + 0.24 + ], + "angle": 0, + "content": "Figure 9. Team HannahSR: The overall architecture of Multi-level Refinement and Bias-learnable Attention Dual Branch Network (MR-BAN)." + }, + { + "type": "image", + "bbox": [ + 0.161, + 0.259, + 0.838, + 0.404 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.371, + 0.411, + 0.626, + 0.424 + ], + "angle": 0, + "content": "(a) Team HannahSR: The MRBAB architecture." + }, + { + "type": "image", + "bbox": [ + 0.149, + 0.436, + 0.856, + 0.721 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.304, + 0.727, + 0.551, + 0.739 + ], + "angle": 0, + "content": "(b) Team HannahSR: The MRBA architecture." + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.757, + 0.907, + 0.814 + ], + "angle": 0, + "content": "Figure 10. Team HannahSR: The detailed architecture of the network MRBAN. (a) MRBAB: Multi-level Refinement and Bias-learnable Attention Block; (b) MRBA: Multi-level Refinement and Bias-learnable Attention; Other components: BSRB: Blueprint Shallow Residual Block [66]; BSConv: Blueprint Separable Convolution [66]; RCCA: Reallocated Contrast-aware Channel Attention [114]; SGSA: Sparse Global Self-attention [114]." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.84, + 0.486, + 0.9 + ], + "angle": 0, + "content": "information loss. To address this issue, SPAN [109] used an origin-symmetric activation function. They added a bias of \\(-0.5\\) to the Sigmoid function, which allowed the information carried by negative inputs to be taken into account." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.84, + 0.909, + 0.901 + ], + "angle": 0, + "content": "However, when dealing with the larger positive inputs, their outputs would be approximately equal to 0.5. When compared with the original 1.0, they inevitably suffered from significant information loss. To tackle this issue, they set the" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.925, + 0.509, + 0.937 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.09, + 0.092, + 0.483, + 0.137 + ], + "angle": 0, + "content": "negative bias as a learnable parameter so that it can be updated dynamically during the training process to optimally boost the accuracy performance." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.138, + 0.483, + 0.258 + ], + "angle": 0, + "content": "Eventually, they adopt the reparameterization technique. They replace the first \\(3 \\times 3\\) convolution layer with identical scale reparameterization block to extract richer local features for supplying the following layers with more valuable information, while standardizing the number of channels to an identical scale for lightweight super resolution networks to prevent incurring inappropriate model capacity increments." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.26, + 0.484, + 0.319 + ], + "angle": 0, + "content": "Training Strategy. The proposed MRBAN consists of 4 MRBAB, and the feature channel is set to 32. They adopt a four-step training strategy. The details of the training steps are as follows:" + }, + { + "type": "text", + "bbox": [ + 0.086, + 0.321, + 0.483, + 0.426 + ], + "angle": 0, + "content": "1. Pretraining on the DIV2K [2] and Flickr2K [69] datasets with the patch size of \\(256 \\times 256\\) and the mini-batch size is set to 64. The MRBAN is trained by minimizing the L1 loss function with the Adam optimizer. The initial learning rate is set to \\(3 \\times 10^{-3}\\) and halved at \\(\\{100\\mathrm{k}, 500\\mathrm{k}, 800\\mathrm{k}, 900\\mathrm{k}, 950\\mathrm{k}\\}\\)-iteration. The number of iterations is \\(1000\\mathrm{k}\\)." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.427, + 0.483, + 0.517 + ], + "angle": 0, + "content": "2. Initial fine-tuning on DIV2K and the first 10K images of LSDIR [64]. The patch size is \\(384 \\times 384\\) and the minibatch size is set to 32. The model is trained by minimizing the MSE loss function. The initial learning rate is set to \\(1.5 \\times 10^{-3}\\) and halved at \\(\\{100\\mathrm{k}, 500\\mathrm{k}, 800\\mathrm{k}, 900\\mathrm{k}, 950\\mathrm{k}\\}\\)-iteration. The number of iterations is \\(1000\\mathrm{k}\\)." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.518, + 0.483, + 0.623 + ], + "angle": 0, + "content": "3. Advanced training on the DIV2K and the whole LSDIR datasets. The patch size is \\( 384 \\times 384 \\) and the mini-batch size is set to 64. The model is trained by minimizing the MSE loss function. The initial learning rate is set to \\( 8 \\times 10^{-4} \\) and halved at \\( \\{100\\mathrm{k}, 500\\mathrm{k}, 800\\mathrm{k}, 900\\mathrm{k}, 950\\mathrm{k}\\} \\)-iteration. The number of iterations is \\( 1000\\mathrm{k} \\). This stage can be repeated twice." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.624, + 0.483, + 0.713 + ], + "angle": 0, + "content": "4. Final fine-tuning on the DIV2K and the whole LSDIR datasets. The patch size is \\( 448 \\times 448 \\) and the mini-batch size is set to 128. The model is trained by minimizing the MSE loss function. The initial learning rate is set to \\( 5 \\times 10^{-6} \\) and halved at \\( \\{100\\mathrm{k}, 500\\mathrm{k}, 800\\mathrm{k}, 900\\mathrm{k}, 950\\mathrm{k}\\} \\)-iteration. The number of iterations is \\( 1000\\mathrm{k} \\)." + }, + { + "type": "list", + "bbox": [ + 0.085, + 0.321, + 0.483, + 0.713 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.727, + 0.188, + 0.741 + ], + "angle": 0, + "content": "4.9. Davinci" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.75, + 0.484, + 0.901 + ], + "angle": 0, + "content": "Final Solution Description. They chose the Swift Parameter-free Attention Network [112] as their base model, the winner of the NTIRE2024 ESR track. After trying the evolution pipeline mentioned in SwinFIR [133], the content decoupling strategy proposed in CoDe [31], the pre-training fine-tuning paradigm, and the model compression techniques such as model pruning and knowledge distillation discussed in Ref [51] respectively, they employ the model Pruning of the last layer with \\( l_{2} \\) norm of the baseline and introducing the mixup Augmentation as their final" + }, + { + "type": "image", + "bbox": [ + 0.517, + 0.088, + 0.909, + 0.59 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.512, + 0.597, + 0.907, + 0.655 + ], + "angle": 0, + "content": "Figure 11. Team Rochester: They reduce the channel dimension from 48 to 28 from the original design and introduce additional convolution to stabilize the attention feature maps from SPAB blocks. Example input and output are adapted from [99]." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.665, + 0.907, + 0.695 + ], + "angle": 0, + "content": "proposal to preserve the original parameter distributions as much as possible, termed PlayerAug." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.697, + 0.909, + 0.803 + ], + "angle": 0, + "content": "Training Details. After pruning the SPAN, they train it on the DIV2K_LSDIR mixed training set, cropping the patch size to 512. The random rotation and flip are configured for data augmentation. The Adam [54] optimizer with \\(\\beta_{1} = 0.9\\) and \\(\\beta_{2} = 0.99\\) and the L1 loss function are adopted to optimize the models, and the mini-batch size is set to 32. All the experiments are conducted on 8 L40S GPUs." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.817, + 0.638, + 0.832 + ], + "angle": 0, + "content": "4.10. Rochester" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.84, + 0.908, + 0.903 + ], + "angle": 0, + "content": "Method Details. The proposed method, ESRNet, is an improved and more efficient variant of last year's XiaomiMM SPAN network [112]. The original SPAN network demonstrated strong generation quality but required" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.925, + 0.51, + 0.937 + ], + "angle": 0, + "content": "14" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.09, + 0.092, + 0.482, + 0.168 + ], + "angle": 0, + "content": "complex training tricks and model fusion strategies, making it difficult to reproduce and computationally expensive. In contrast, ESRNet achieves similar performance with significantly reduced computational overhead, enhanced training stability, and improved inference speed." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.195, + 0.482, + 0.255 + ], + "angle": 0, + "content": "Model Architecture. A key aspect of ESRNet's design is its ability to maintain high performance while reducing computational costs. As shown in Fig. 11, their modifications include:" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.26, + 0.483, + 0.351 + ], + "angle": 0, + "content": "- Retaining the first six SPAN attention blocks as core feature extraction components while introducing a lightweight convolutional layer to refine the extracted feature maps before fusing them with the original features. This modification enhances feature representation while stabilizing the training process." + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.351, + 0.483, + 0.44 + ], + "angle": 0, + "content": "- Reducing the number of feature channels from 48 to 26, leading to a substantial decrease in both model parameters and floating-point operations (FLOPs). This reduction not only lowers GPU memory consumption but also improves inference efficiency without degrading performance." + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.442, + 0.483, + 0.501 + ], + "angle": 0, + "content": "- Improved validation speed, as ESRNet requires fewer computations per forward pass, making it more suitable for real-time applications compared with the baseline method." + }, + { + "type": "list", + "bbox": [ + 0.092, + 0.26, + 0.483, + 0.501 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.507, + 0.483, + 0.584 + ], + "angle": 0, + "content": "Overall, ESRNet has approximately half the number of parameters and FLOPs compared to the baseline EFPN network, yet it maintains a high PSNR score, demonstrating that their modifications achieve an excellent trade-off between efficiency and performance." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.609, + 0.483, + 0.701 + ], + "angle": 0, + "content": "Training Methodology. They train ESRNet on RGB image patches of size \\(256 \\times 256\\), applying standard augmentation techniques such as random flipping and rotation to enhance generalization. To ensure stable convergence and optimal performance, they adopt a three-stage training strategy:" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.705, + 0.483, + 0.78 + ], + "angle": 0, + "content": "1. Initial Feature Learning: They train the model with a batch size of 64 using Charbonnier loss, a robust loss function that mitigates the effects of outliers. The Adam optimizer is used with an initial learning rate of \\(2 \\times 10^{-4}\\), which follows a cosine decay schedule." + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.781, + 0.483, + 0.84 + ], + "angle": 0, + "content": "2. Refinement Stage: They progressively decrease the learning rate linearly from \\( 2 \\times 10^{-4} \\) to \\( 2 \\times 10^{-5} \\), allowing the model to refine its learned features while maintaining stable gradients." + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.84, + 0.483, + 0.901 + ], + "angle": 0, + "content": "3. Fine-Tuning with L2 Loss: In the final stage, they adopt L2 loss to fine-tune the model, further enhancing detail restoration. The learning rate is further reduced from \\( 2 \\times 10^{-5} \\) to \\( 1 \\times 10^{-6} \\) for smooth convergence." + }, + { + "type": "list", + "bbox": [ + 0.092, + 0.705, + 0.483, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.092, + 0.905, + 0.151 + ], + "angle": 0, + "content": "By structuring the training into these stages, they eliminate the need for complex training tricks used in previous approaches while achieving more stable and reliable optimization." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.153, + 0.905, + 0.29 + ], + "angle": 0, + "content": "One of the most significant advantages of ESRNet is its improved validation time due to its optimized architecture. Compared to the original SPAN network, ESRNet achieves a similar PSNR score while reducing computational complexity. The model requires significantly fewer FLOPs and parameters, leading to a noticeable reduction in inference time and GPU memory usage. This makes ESRNet a practical solution for applications requiring both high-quality generation and efficient computation." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.301, + 0.602, + 0.315 + ], + "angle": 0, + "content": "4.11.IESR" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.324, + 0.905, + 0.459 + ], + "angle": 0, + "content": "Model Design. As for the Efficint Super-Resolution competition, they proposed the Inference Efficient Super-Resolution Net (IESRNet). IESRNet is not a specific network, but a bag of tricks to make a Super-Resolution Network infer more Efficient on a GPU. They will apply these tricks based on DIPNet [128], which won the first place on the NTIRE2023 ESR challenge in runtime track [65]. The specific structure of IESRNet is shown in Fig. 12. They will describe the tricks they used in detail below." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.461, + 0.905, + 0.55 + ], + "angle": 0, + "content": "1. Remove bias in Conv. The bias add of the convolution is a relatively inefficient operation in the convolution layer. It only occupies a small part of the FLOPs in the convolution, but occupies \\(15\\%\\) or more of the runtime. They removed the bias of all convolutional layers except the ESA module, and the PSNR loss was less than 0.01db." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.552, + 0.905, + 0.657 + ], + "angle": 0, + "content": "2. Less Residual Connection. Although residual connection helps the model converge during training, too many residual structures will introduce many additional operations, reducing the inference efficiency of the model. Therefore, they replace the two middle RRFB in DIPNet with reparameterization no residual block(RNRB) to balance the trade-off between inference efficiency and model accuracy." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.659, + 0.905, + 0.809 + ], + "angle": 0, + "content": "3. Standard number of Conv channels. Since the convolution operator has different performance optimizations for different configurations, generally, convolutions with a standard number of channels (such as 32, 48, and 64) are more deeply optimized and therefore occupy higher inference efficiency on the GPU. Based on NVIDIA V100 GPU testing, a 48-channel \\(3^{*}3\\) convolution is even faster than a 30-channel convolution, although the FLOPs is over doubled. For this reason, they set the number of feature channels to 32, and the number of ESA channels to 16." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.811, + 0.905, + 0.9 + ], + "angle": 0, + "content": "4. Efficient activation function. They replace all activation functions in the network with SiLU [27], which performs well in super-resolution tasks and significantly outperforms the RELU. In addition to its great performance, SiLU is also very fast when inferring on GPUs due to its computational characteristics." + }, + { + "type": "list", + "bbox": [ + 0.512, + 0.461, + 0.905, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.925, + 0.508, + 0.936 + ], + "angle": 0, + "content": "15" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.091, + 0.086, + 0.912, + 0.462 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.172, + 0.471, + 0.825, + 0.487 + ], + "angle": 0, + "content": "Figure 12. Team IRSR: The overview of the proposed IESRNet. The IESRNet is built based on DIPNet [128]." + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.513, + 0.482, + 0.619 + ], + "angle": 0, + "content": "5. Reparameterization. They adopt re-parameterization to enhance the representation capabilities of the model. They use complex re-parameterization structures to train during training and merge them into regular convolutions during inference without incurring additional computational overhead. The specific rep-structure is shown in Fig. 12(c)." + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.621, + 0.483, + 0.711 + ], + "angle": 0, + "content": "Implementation Details. The training dataset consists of DIV2K and the first 15,000 images of LSIDR [64]. Random flipping and rotation are adopt for Data Augmentation. They adopt a multi-stage training paradigm to train their super-resolution network. The details of training steps are as follows:" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.715, + 0.483, + 0.805 + ], + "angle": 0, + "content": "1. Initial training: HR patches of size \\(256 \\times 256\\) are randomly cropped from HR images. They set the mini-batch as 128. The model is trained by minimizing the PSNR loss with the Adam optimizer. The initial learning rate is set to 5e-4, and halved per 200k iterations. The total number of iterations is 1000k." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.808, + 0.483, + 0.839 + ], + "angle": 0, + "content": "2. Warm-Start Training: Load the pre-trained weight and train it three times with the same setting." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.841, + 0.483, + 0.901 + ], + "angle": 0, + "content": "3. Finetune with increasing patch size: In this process, the training patch size is progressively increased to improve the performance, which is selected from [384, 512, 640]. For each patch size, they finetune the network with \\(1000\\mathrm{k}\\)" + }, + { + "type": "list", + "bbox": [ + 0.09, + 0.715, + 0.483, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.513, + 0.907, + 0.573 + ], + "angle": 0, + "content": "iterations. And the initial learning rate is correspondingly selected from [2e-4, 1e-4, 5e-5]. The batch size decreases to 64 for saving GPU memory. All experiments are conducted on 8 NVIDIA V100 GPUs." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.588, + 0.597, + 0.603 + ], + "angle": 0, + "content": "4.12. ASR" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.612, + 0.906, + 0.688 + ], + "angle": 0, + "content": "Model Design. The network architecture is built based on DIPNet [128], which won the first place on the NTIRE2023 ESR challenge runtime track [65]. They made several modifications to make it more efficient while maintaining the excellent performance. They call it DIPNetSlim." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.69, + 0.907, + 0.901 + ], + "angle": 0, + "content": "First of all, they did not use pruning as DIPNet dose. Although it can decrease the model parameters, it will degrade the inference speed of the model due to the irregular number of convolution channels. These operator configurations are not deeply optimized. For this reason, they set the number of feature channels to 32, and the number of ESA channels to 16. Second, they re-parameterize all 3x3 convolutional layers in the network. They adopt re-parameterization to enhance the expressiveness of the model. They use complex re-parameterization structures to train during training and merge them into regular convolutions during inference without incurring additional infer overhead. In addition, they changed the last convolution before the residual connection from 3x3 to 1x1, saving parameters while retain-" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.925, + 0.509, + 0.937 + ], + "angle": 0, + "content": "16" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.09, + 0.092, + 0.482, + 0.152 + ], + "angle": 0, + "content": "ing the ability of feature normalization. Finally, they replace all activation functions in the network with SiLU [27], which performs well in super-resolution tasks and significantly outperforms the RELU." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.153, + 0.483, + 0.198 + ], + "angle": 0, + "content": "Implementation Details. The training dataset consists of DIV2K [103] and the first 15,000 images of LSIDR. The details of training steps are as follows:" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.2, + 0.482, + 0.289 + ], + "angle": 0, + "content": "1. Initial Training: HR patches of size \\(256 \\times 256\\) are randomly cropped from HR images. They set the mini-batch as 128. The model is trained by minimizing the PSNR loss with the Adam optimizer. The initial learning rate is set to 5e-4, and halved per 200k iterations. The total number of iterations is 1000k." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.291, + 0.482, + 0.32 + ], + "angle": 0, + "content": "2. Warm-Start Training: Load the pre-trained weight and train it three times with the same setting." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.322, + 0.482, + 0.427 + ], + "angle": 0, + "content": "3. Finetune with increasing patch size: In this process, the training patch size is progressively increased to improve the performance, which is selected from [384, 512, 640]. For each patch size, they finetune the network with \\(1000k\\) iterations. And the initial learning rate is correspondingly selected from [2e-4, 1e-4, 5e-4]. The batch size decreases to 64 for saving GPU memory." + }, + { + "type": "list", + "bbox": [ + 0.09, + 0.2, + 0.482, + 0.427 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.439, + 0.208, + 0.453 + ], + "angle": 0, + "content": "4.13. VPEG_O" + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.462, + 0.483, + 0.656 + ], + "angle": 0, + "content": "General Method Description. They introduce SAFMnV3, an enhanced version of SAFMN [96] for solving real-time image SR. This solution is mainly concentrates on improving the effectiveness of the spatially-adaptive feature modulation (SAFM) [96] layer. Different from the original SAFMN, as shown in Fig 13, the simplified SAFM layer is able to extract both local and non-local features simultaneously without channel splitting. Within this module, they use two \\(3 \\times 3\\) convolutions to project the input and use variance-constrained feature modulation operator [144] in branches with fewer channels, and finally aggregate these two parts of the feature, then refine the aggregated features via a feed-forward neural network." + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.659, + 0.483, + 0.749 + ], + "angle": 0, + "content": "Training Description. The proposed SAFMNv3 consists of 6 feature mixing modules, and the number of channels is set to 40. They rain the network on RGB channels and augment the training data with random flipping and rotation. Following previous methods, the training process is divided into three stages:" + }, + { + "type": "text", + "bbox": [ + 0.086, + 0.75, + 0.483, + 0.856 + ], + "angle": 0, + "content": "1. In the first stage, they randomly crop \\(256 \\times 256\\) HR image patches from the selected LSIDR [64] dataset, with a batch size of 64. The proposed SAFMNv3 is trained by minimizing L1 loss and the frequency loss[14] with Adam optimizer for total 800, 000 iterations. The initial learning rate is set to 2e-3, with a Cosine Annealing scheme [78]." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.856, + 0.483, + 0.901 + ], + "angle": 0, + "content": "2. In the second stage, they increase the size of the HR image patches to \\(384 \\times 384\\). The model is fine-tuned on the DF2K [100] by minimizing Charbonnier loss function." + }, + { + "type": "list", + "bbox": [ + 0.085, + 0.75, + 0.483, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.527, + 0.092, + 0.905, + 0.121 + ], + "angle": 0, + "content": "The initial learning rate is set to 5e-4, and the total iterations is \\(500\\mathrm{k}\\)" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.122, + 0.905, + 0.168 + ], + "angle": 0, + "content": "3. In the third stage, the batch size is set to 64, and PSNR loss is adopted to optimize over \\(300\\mathrm{k}\\) iterations. The initial learning rate is set to 5e-5." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.171, + 0.905, + 0.217 + ], + "angle": 0, + "content": "Throughout the training process, they also employ an Exponential Moving Average (EMA) strategy to enhance the robustness of training." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.237, + 0.613, + 0.251 + ], + "angle": 0, + "content": "4.14.mmSR" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.263, + 0.905, + 0.67 + ], + "angle": 0, + "content": "Method. They improve the model based on SAFMN++ [91] and name it FAnet as shown in Fig. 14. Compared to SAFMN++, their model achieves a higher PSNR with a lower computational cost. Unlike the original SAFMN++ method, they introduce modifications in both the data and model structure. In terms of model structure, as shown in the figure, they improve the Feature Mixing Module of the original architecture and incorporate the concept of reparameterization, designing the RFMM. They modify the convolutional extraction network preceding the original module into a parallel structure to accommodate multi-granularity feature extraction and apply re-parameterization [23] during inference. Furthermore, they adjust the downsampling factor in SimpleSAFM to 16 to achieve lower computational complexity. Regarding the data, in addition to utilizing the provided training dataset, they analyze the superresolution results of the model and identify common issues in fine-detail generation. Given constraints on model parameters and computational resources, it is impractical for a lightweight model to generate details identical to the ground truth. Therefore, they shift their focus to expanding the training dataset. Specifically, they use 10,800 images from the training dataset as input and employ convolutional neural networks such as Omni-SR [113] to generate new images. This additional data is incorporated into the training process to facilitate learning and mitigate the risk of learning bias caused by excessive learning difficulty." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.675, + 0.907, + 0.9 + ], + "angle": 0, + "content": "Training Details. They train their model on the DIV2K [100], Flickr2K [70], and LSDIR [64] datasets. The cropped low-resolution (LR) image size is set to 64 × 64 and subjected to random flipping and rotation. The FAnet model is optimized using the Adam optimizer with L1 loss minimization in a multi-stage training scheme. During the training phase, they set the initial learning rate to \\(2 \\times 10^{-3}\\) and the minimum learning rate to \\(1 \\times 10^{-6}\\), training for 500,000 iterations with a mini-batch size of 512. In finetuning stage, Initialized with training phase weights, they fine-tune the model with the given training dataset and additional dataset which is proposed as above. They finetune the model using a learning rate of \\(1 \\times 10^{-4}\\) and the minimum learning rate set to \\(1 \\times 10^{-6}\\), with a mini-batch size of 64." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.925, + 0.509, + 0.937 + ], + "angle": 0, + "content": "17" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.1, + 0.093, + 0.9, + 0.222 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.293, + 0.238, + 0.704, + 0.253 + ], + "angle": 0, + "content": "Figure 13. Team VPEG_O: An overview of the proposed SAFMNv3." + }, + { + "type": "image", + "bbox": [ + 0.094, + 0.274, + 0.482, + 0.453 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.462, + 0.484, + 0.49 + ], + "angle": 0, + "content": "Figure 14. Team mmSR: The overall network architecture of FAnet." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.518, + 0.203, + 0.533 + ], + "angle": 0, + "content": "4.15. ChanSR" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.541, + 0.484, + 0.662 + ], + "angle": 0, + "content": "General Method Description. They propose the Edge Enhanced Convolutional Network (EECNet) for the efficient super-resolution task. The network architecture is inspired by the design of SRN [118], while fully exploring the capacity of reparameterizable convolution. The whole architecture is shown in Fig. 15(a). They introduce a predefined High-Pass Filter (HPF) branch to explicitly capture edge details, formulated as:" + }, + { + "type": "equation", + "bbox": [ + 0.18, + 0.676, + 0.483, + 0.726 + ], + "angle": 0, + "content": "\\[\n\\mathbf {K} _ {h p f} = \\frac {1}{1 6} \\left[ \\begin{array}{r r r} - 1 & - 2 & - 1 \\\\ - 2 & 1 2 & - 2 \\\\ - 1 & - 2 & - 1 \\end{array} \\right]. \\tag {8}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.734, + 0.483, + 0.869 + ], + "angle": 0, + "content": "Then they integrate the proposed HPF into the EDBB [116], creating the subEEC module. As subEEC can be mathematically equivalent to a standard \\(3 \\times 3\\) convolution, they replace the original \\(3 \\times 3\\) convolution in RRRB [25] with our subEEC to obtain the final EEC architecture, whose structure is shown in Fig. 15(b). Notably, to ensure valid re-parameterization, they initialize the bias of the first convolution layer as zero to compensate for the zeropadding operation in subEEC." + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.871, + 0.484, + 0.901 + ], + "angle": 0, + "content": "To better capture global spatial information, they adopt the simplified Efficient Spatial Attention mechanism from" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.279, + 0.905, + 0.354 + ], + "angle": 0, + "content": "SRN [118], whose structure is shown in Fig. 15(c). Compared with the original ESA, this implementation removes the \\(1 \\times 1\\) convolution layer and reduces computational complexity by employing only a single \\(3 \\times 3\\) convolution in the convolutional group." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.355, + 0.907, + 0.534 + ], + "angle": 0, + "content": "Training Description. The proposed EECNet contains eight EEBs, in which they set the number of feature maps to 32. Also, the channel number of the ESA is set to 16 similar to [56]. Throughout the entire training process, they use the Adam optimizer [54], where \\(\\beta 1 = 0.9\\) and \\(\\beta 2 = 0.999\\). The model is trained for \\(1000k\\) iterations in each stage. Input patches are randomly cropped and augmented. Data augmentation strategies included horizontal and vertical flips, and random rotations of 90, 180, and 270 degrees. Model training was performed using Pytorch 1.12.0 [85] on RTX 3090. Specifically, the training strategy consists of several steps as follows." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.536, + 0.906, + 0.656 + ], + "angle": 0, + "content": "1. In the starting stage, they train the model from scratch on the 800 images of DIV2K [4] and the first 10k images of LSDIR [64] datasets. The model is trained for a total \\(10^{6}\\) iterations by minimizing L1 loss and FFT loss [15]. The HR patch size is set to \\(256 \\times 256\\), while the mini-batch size is set to 64. They set the initial learning rate to \\(1 \\times 10^{-3}\\) and the minimum one to \\(1 \\times 10^{-5}\\), which is updated by the Cosine Annealing scheme." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.657, + 0.905, + 0.747 + ], + "angle": 0, + "content": "2. In the second stage, they increase the HR patch size to 384, while the mini-batch size is set to 32. The model is fine-tuned by minimizing the L1 loss and the FFT loss. They set the initial learning rate to \\(5 \\times 10^{-4}\\) and the minimum one to \\(1 \\times 10^{-6}\\), which is updated by the Cosine Annealing scheme." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.748, + 0.905, + 0.809 + ], + "angle": 0, + "content": "3. In the last stage, the model is fine-tuned with \\(480 \\times 480\\) HR patches, however, the loss function is changed to minimize the combination of L2 loss and FFT loss [15]. Other settings are the same as Stage 2." + }, + { + "type": "list", + "bbox": [ + 0.512, + 0.536, + 0.906, + 0.809 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.819, + 0.688, + 0.833 + ], + "angle": 0, + "content": "4.16. Pixel Alchemists" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.84, + 0.906, + 0.902 + ], + "angle": 0, + "content": "Network Architecture. The overall architecture of team Pixel Alchemists is shown in Fig. 16. They propose a novel architecture named resolution-consistent UNet (RCUNet). The proposed network consists of four deep feature comple" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.925, + 0.508, + 0.937 + ], + "angle": 0, + "content": "18" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.113, + 0.088, + 0.885, + 0.199 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.114, + 0.203, + 0.885, + 0.386 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.308, + 0.398, + 0.687, + 0.412 + ], + "angle": 0, + "content": "Figure 15. Team ChanSR: Network architecture of the EECNet." + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.439, + 0.483, + 0.56 + ], + "angle": 0, + "content": "ment and distillation blocks (DFCDB). Inspired by [35, 83], the input feature map is split along the channel dimension in each block. Then, four convolutional layers process one of the split feature maps to generate complementary features. The input features and complementary features are concatenated to avoid loss of input information and distilled by a conv-1 layer. Besides, the output feature map of DFCDB is further enhanced by the ESA layer [55]." + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.562, + 0.483, + 0.728 + ], + "angle": 0, + "content": "Online Convolutional Re-parameterization. Reparameterization [136] has improved the performance of image restoration models without introducing any inference cost. However, the training cost is large because of complicated training-time blocks. To reduce the large extra training cost, online convolutional re-parameterization [41] is employed by converting the complex blocks into a single convolutional layer during the training stage. The architecture of RepConv is shown in Fig. 17. It can be converted to a \\(3 \\times 3\\) convolution during training, which saves the training cost." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.729, + 0.483, + 0.772 + ], + "angle": 0, + "content": "Training Details. The proposed RCUNet has four DFCDBs. The number of features is set to 48, and the number of ESA channels is set to 16." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.775, + 0.483, + 0.806 + ], + "angle": 0, + "content": "DIV2K [4] and LSDIR [64] datasets are used for training. The training details are as follows:" + }, + { + "type": "text", + "bbox": [ + 0.087, + 0.81, + 0.483, + 0.901 + ], + "angle": 0, + "content": "1. The model is first trained from scratch with \\(256 \\times 256\\) patches randomly cropped from HR images from the DIV2K and LSDIR datasets. The mini-batch size is set to 64. The L1 loss and pyramid loss are minimized with the Adam optimizer. The initial learning rate is set to 1e-3 with a cosine annealing schedule. The total number of" + }, + { + "type": "text", + "bbox": [ + 0.527, + 0.439, + 0.657, + 0.453 + ], + "angle": 0, + "content": "iterations is 1000k." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.453, + 0.905, + 0.514 + ], + "angle": 0, + "content": "2 Then the model is initialized with the pre-trained weights of Stage 1. The MSE loss and pyramid loss is used for fine-tuning with \\(512 \\times 512\\) HR patches and a learning rate of 1e-5 for 500k iterations." + }, + { + "type": "image", + "bbox": [ + 0.543, + 0.529, + 0.88, + 0.743 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.537, + 0.761, + 0.881, + 0.775 + ], + "angle": 0, + "content": "Figure 16. Team Pixel Alchemists: RCUNet Architecture." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.803, + 0.585, + 0.818 + ], + "angle": 0, + "content": "4.17.LZ" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.825, + 0.905, + 0.901 + ], + "angle": 0, + "content": "General Method Description. To enhance model complexity without increasing computational overhead, they focus on designing structurally simple yet expressively powerful components, notably through re-parameterization techniques. Drawing inspiration from ECBSR [137]," + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.925, + 0.509, + 0.937 + ], + "angle": 0, + "content": "19" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.132, + 0.096, + 0.451, + 0.226 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.234, + 0.235, + 0.372, + 0.246 + ], + "angle": 0, + "content": "(a) Online Reparameterization" + }, + { + "type": "image_caption", + "bbox": [ + 0.099, + 0.264, + 0.475, + 0.278 + ], + "angle": 0, + "content": "Figure 17. Team Pixel Alchemists: Online re-parameterization." + }, + { + "type": "image", + "bbox": [ + 0.103, + 0.334, + 0.434, + 0.464 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.126, + 0.474, + 0.446, + 0.488 + ], + "angle": 0, + "content": "Figure 18. Team LZ: Detailed architecture of TDESR." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.517, + 0.483, + 0.623 + ], + "angle": 0, + "content": "their TDESR framework strategically implements reparameterization to improve super-resolution performance while preserving training efficiency. Following the reparameterization phase, they employ tensor decomposition for light-weight network design, where standard \\(3 \\times 3\\) convolutions are factorized into sequential \\(3 \\times 1\\) and \\(1 \\times 3\\) convolutional operations." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.624, + 0.483, + 0.82 + ], + "angle": 0, + "content": "As illustrated in Fig. 18, their architecture comprises five TD Blocks interspersed with three standard \\(3 \\times 3\\) convolutions, implementing a skip connection through elementwise addition between the input features (processed by a \\(3 \\times 3\\) convolution) and intermediate feature maps. The network maintains 64 channels throughout, with tensor decomposition intermediate channels reduced to 32 for computational efficiency. They integrate insights from Swift-SR's parameter-free attention mechanism [112] to enhance feature representation. The final reconstruction stage employs PixelShuffle with 48 input channels for high-quality image upsampling, completing their balanced design of performance and efficiency." + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.821, + 0.483, + 0.85 + ], + "angle": 0, + "content": "Training Details. The training details of team LZ are as follows." + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.856, + 0.483, + 0.902 + ], + "angle": 0, + "content": "- Base Training (\\( \\times 2 \\) upscaling) The model is initially trained for \\( \\times 2 \\) super-resolution using randomly cropped \\( 96 \\times 96 \\) HR patches with a batch size of 32. They employ" + }, + { + "type": "image", + "bbox": [ + 0.523, + 0.088, + 0.9, + 0.166 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.541, + 0.176, + 0.878, + 0.19 + ], + "angle": 0, + "content": "Figure 19. Team Z6: Network architecture of GloReNet." + }, + { + "type": "text", + "bbox": [ + 0.526, + 0.218, + 0.905, + 0.293 + ], + "angle": 0, + "content": "the Adam optimizer to minimize the L1 loss, starting with an initial learning rate of \\( 1 \\times 10^{-4} \\) that decays via Multi-StepLR scheduler at the mid-training point. The training completes over 100 epochs, utilizing re-parameterization techniques throughout the process." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.293, + 0.905, + 0.399 + ], + "angle": 0, + "content": "- Enhanced Resolution Training. Building upon the \\(\\times 2\\) pretrained weights, this phase increases the HR patch size to \\(128 \\times 128\\) while reducing the batch size to 16. All other hyperparameters (optimizer, learning rate schedule, and re-parameterization) remain consistent with Stage 1. The continued use of L1 loss maintains training stability during this resolution scaling phase." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.399, + 0.905, + 0.519 + ], + "angle": 0, + "content": "- Convolutional Architecture Refinement. They implement standard \\(3 \\times 3\\) convolutional layers in this optimization stage, replacing previous architectural components. The training objective shifts to L2 loss minimization for fine-tuning, while preserving the fundamental network structure and parameter initialization from earlier stages. This transition enhances edge preservation in super-resolved outputs." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.52, + 0.905, + 0.641 + ], + "angle": 0, + "content": "- Tensor Decomposition Optimization. The final refinement employs tensor decomposition techniques with dual loss supervision \\((\\mathrm{L1} + \\mathrm{L2})\\). Training progresses with \\(256 \\times 256\\) HR patches using a reduced batch size of 16 and lower initial learning rate \\((1 \\times 10^{-5})\\). They implement cosine annealing scheduling for smooth convergence, completing the multi-stage optimization process through L2-loss-focused fine-tuning.." + }, + { + "type": "list", + "bbox": [ + 0.514, + 0.293, + 0.905, + 0.641 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.652, + 0.581, + 0.665 + ], + "angle": 0, + "content": "4.18.Z6" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.674, + 0.905, + 0.779 + ], + "angle": 0, + "content": "General Method Description. They introduce a lightweight and efficient image super-resolution (SR) network that leverages both global and local feature attention mechanisms to produce high-quality reconstructions. As depicted in Fig. 19, their network is divided into two main blocks named Global Feature Attention Block (GFAB) and Local Feature Attention Block (LFAB)." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.78, + 0.907, + 0.901 + ], + "angle": 0, + "content": "GFAB is designed to capture large-scale context and dependencies across the entire image. Enhances globally significant features, helping the model learn the global information from input images. And LFAB can focus on refining fine-grained details and spatially localized information. Emphasizes subtle textural elements and sharp edges that are critical for upscaling. GFAB utilizes the parameter-free attention module (SPAN [111]) and LFAB uses Effi" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.925, + 0.509, + 0.937 + ], + "angle": 0, + "content": "20" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.09, + 0.092, + 0.482, + 0.257 + ], + "angle": 0, + "content": "cient Spatial Attention (ESA) [72] to selectively highlight essential features. And all convolution layers applied reparameterization block [127]. The network begins with a series of convolution layers to extract initial features, which then pass through GFAB units for global attention. Subsequently, the output is processed by LFAB units for local attention, and finally, a PixelShuffle layer upscales the features to the target resolution. By combining these two parts, their method effectively preserves global context and local details, achieving a balance between high-quality reconstruction and efficient low computation." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.258, + 0.482, + 0.394 + ], + "angle": 0, + "content": "Training Description. Their training process employs a scratch training stage and a fine-tuning stage. In the first scratch training stage, they use DIV2K datasets for the training dataset. In the fine-tuning stage, they use DIV2K and the first 10K LSDIR datasets for the training dataset. All experiments are carried out in the same experimental environment. The training process is executed using RTX A6000 GPUs. They use the Pytorch 1.13 version for all training steps." + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.398, + 0.48, + 0.488 + ], + "angle": 0, + "content": "- Scratch train stage: In the first step, their model is trained from scratch. The LR patches were cropped from LR images with an 8 mini-batch of \\(256 \\times 256\\). Adam optimizer is used with a learning rate of 0.0005 during scratch training. The cosine warm-up scheduler is used. The total number of epochs is set to 2000. They use the \\(l1\\) loss." + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.489, + 0.481, + 0.623 + ], + "angle": 0, + "content": "- Fine-tuning stage: In the second step, the model is initialized with the weights trained in the first step. To improve precision, they used the loss method \\( l2 \\) loss. This stage improves the value of the peak signal-to-noise ratio (PSNR) by \\( 0.05 \\sim 0.06 \\) dB. In this step, The LR patches are cropped from LR images with 32 mini-batch \\( 512 \\times 512 \\) sizes. And the initial learning rate is set to 0.00005 and the Adam optimizer is used in conjunction with a cosine warm-up. The total epoch is set to 200 epochs." + }, + { + "type": "list", + "bbox": [ + 0.091, + 0.398, + 0.481, + 0.623 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.636, + 0.216, + 0.65 + ], + "angle": 0, + "content": "4.19. TACO_SR" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.659, + 0.482, + 0.839 + ], + "angle": 0, + "content": "General Method Description. The overall architecture of their network is showed in Fig. 20(a), inspired by SPAN [110] and PFDNLite [91]. Motivated by the design of the Conv3XC module in SPAN, they introduce two additional parallel branches with varying channel expansion ratios, resulting in a novel convolution module termed TenInOneConv, which fuses multiple convolution kernels into a single equivalent kernel to improve inference efficiency. Furthermore, to enhance the model's capability in capturing local texture and detail features, the LocalAttention module, inspired by PFDNLite is integrated, allowing the network to better focus on informative regions within feature maps." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.84, + 0.482, + 0.901 + ], + "angle": 0, + "content": "TenInOneSR employs four TenInOneBlock modules. Each of these blocks (detailed in Fig. 20(b)) begins with a LocalAttention module, which enhancing the network's ability to capture fine details. Subsequently, each block ap" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.092, + 0.905, + 0.151 + ], + "angle": 0, + "content": "plies three cascaded TenInOneConv layers, interleaved with the SiLU activation function, to perform hierarchical feature refinement. The block concludes with a residual connection, allowing better gradient flow." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.153, + 0.905, + 0.274 + ], + "angle": 0, + "content": "Notably, the behavior of the TenInOneConv differs between the training and inference phases. During training (Fig. 20(d)), TenInOneConv operates in a multi-branch configuration. It introduces three parallel convolutional branches with different channel expansion ratios (gains set as 1, 2, and 3), along with an additional skip connection. This multi-scale feature extraction enables the network to better aggregate complementary spatial features." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.275, + 0.905, + 0.38 + ], + "angle": 0, + "content": "In the inference stage (Fig. 20(f)), for computational efficiency and faster runtime, these multiple convolution kernels are fused into a single equivalent convolution kernel. Specifically, the parallel branches and skip connection weights are mathematically combined to form one unified \\(3 \\times 3\\) convolutional kernel, significantly accelerating inference without compromising performance." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.397, + 0.905, + 0.668 + ], + "angle": 0, + "content": "Training description. The proposed architecture is trained on two NVIDIA RTX Titan GPUs with a total of 48 GB memory. In the first training stage, the DIV2K dataset is augmented by a factor of \\(85 \\times\\) and registered into the LSDIR format, resulting in a large-scale training set containing 152,991 high-resolution RGB images. During this stage, training is conducted with 64 randomly cropped \\(256 \\times 256\\) patches per batch, using common augmentations such as random flipping and rotation. The model is optimized using the Adam optimizer with L1 loss for a total of 100,000 iterations. The learning rate is initialized at \\(5 \\times 10^{-4}\\) and decayed by half every 20,000 iterations. In the second stage, they keep the training strategy and hyperparameters unchanged, except for increasing the input patch size to \\(384 \\times 384\\) and reducing the batch size to 32 to fit GPU memory. Then another 100,000 training iterations are conducted to further improve the model's performance on higher-resolution textures." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.682, + 0.633, + 0.696 + ], + "angle": 0, + "content": "4.20.AIOT.AI" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.704, + 0.905, + 0.901 + ], + "angle": 0, + "content": "Method. The overall architecture of their network is shown in Fig. 21(a), inspired by the previous leading methods SPAN[112] and ECBSR[138]. They propose an Efficient channel attention super-resolution network acting on space (ECASNet). Specifically, on the basis of SPAB from SPAN, they combine edge-oriented convolution block (ECB) and regularization module (GCT) to form a new reparameterized feature extraction module named enhanced attention and re-parameterization block(EARB), as shown in Fig. 21(b). In addition, unlike SPAN, they find that using channel attention after feature map concatenating can significantly improve performance. For the sake of lightweight design, they use an efficient channel attention" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.925, + 0.507, + 0.937 + ], + "angle": 0, + "content": "21" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.091, + 0.086, + 0.331, + 0.56 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.346, + 0.087, + 0.59, + 0.354 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.615, + 0.087, + 0.905, + 0.319 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.346, + 0.354, + 0.59, + 0.56 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.618, + 0.329, + 0.911, + 0.56 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.286, + 0.568, + 0.712, + 0.584 + ], + "angle": 0, + "content": "Figure 20. Team TACO_SR: The architecture of proposed TenInOneSR." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.609, + 0.483, + 0.64 + ], + "angle": 0, + "content": "module, called the efficient channel attention module which acts on space(CAS), as shown in Fig. 21(c)." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.64, + 0.484, + 0.807 + ], + "angle": 0, + "content": "Training Detail. The datasets used for training include DIV2K and LSDIR. Imitating the previous method, the training process is divided into two stages. In the first stage, they randomly crop \\(256 \\times 256\\) HR image blocks from the ground truth image, batch is 16, and randomly flipped and rotated them. Using Adam optimizer, set \\(\\beta 1 = 0.9\\) and \\(\\beta 2 = 0.999\\), and minimize L1 loss function. The initial learning rate is set to 5e-4, and the cosine learning rate attenuation strategy is adopted. Epoch is set to 200. In the second stage, they changed the loss function to L2, and other settings are the same as those in the first stage." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.818, + 0.2, + 0.833 + ], + "angle": 0, + "content": "4.21.JNU620" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.84, + 0.485, + 0.903 + ], + "angle": 0, + "content": "General Method Description. They propose a reparameterized residual local feature network (RepRLFN) for efficient image super-resolution, which is influenced by existing studies such as RepRFN [19] and RLFN [55]. Fig. 22" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.609, + 0.907, + 0.64 + ], + "angle": 0, + "content": "illustrates the overall architecture of RepRLFN, which has been extensively validated in previous studies." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.641, + 0.907, + 0.791 + ], + "angle": 0, + "content": "They replace the RLFB in RLFN [55] with their reparameterized residual local feature block (RepRLFB). RepBlock is the main component of RepRLFB, which employs multiple parallel branch structures to extract the features of different receptive fields and modes to improve performance. At the same time, the structural re-parameterization technology is leveraged to decouple the training and inference phases to avoid the problem that computational complexity increases caused by the introduction of multi-branch." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.794, + 0.909, + 0.839 + ], + "angle": 0, + "content": "Training Strategy. The proposed RepRLFN consists of 4 RepRLFBs, with the number of feature channels set to 48. The details of the training steps are as follows:" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.84, + 0.909, + 0.902 + ], + "angle": 0, + "content": "1. In the first stage, the model is pre-trained on DIV2K [4]. HR patches of size \\(480 \\times 480\\) are randomly cropped from HR images, and the mini-batch size is set to 32. The model is trained by minimizing the L1 loss function" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.925, + 0.509, + 0.937 + ], + "angle": 0, + "content": "22" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.103, + 0.089, + 0.87, + 0.213 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.432, + 0.215, + 0.543, + 0.233 + ], + "angle": 0, + "content": "(b) ECASNet" + }, + { + "type": "image", + "bbox": [ + 0.092, + 0.256, + 0.48, + 0.369 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.209, + 0.362, + 0.286, + 0.377 + ], + "angle": 0, + "content": "(b) EARB" + }, + { + "type": "image", + "bbox": [ + 0.168, + 0.409, + 0.362, + 0.474 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.209, + 0.493, + 0.272, + 0.51 + ], + "angle": 0, + "content": "(c) CAS" + }, + { + "type": "image", + "bbox": [ + 0.501, + 0.258, + 0.908, + 0.472 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.659, + 0.493, + 0.769, + 0.511 + ], + "angle": 0, + "content": "(d) RepConv" + }, + { + "type": "image_caption", + "bbox": [ + 0.274, + 0.53, + 0.722, + 0.545 + ], + "angle": 0, + "content": "Figure 21. Team AIOT.AI: Detailed architecture of the proposed ECASNet." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.571, + 0.483, + 0.616 + ], + "angle": 0, + "content": "using the Adam optimizer. The initial learning rate is set to 5e-4 and is halved every 200 epochs. The total number of epochs is 800." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.621, + 0.483, + 0.741 + ], + "angle": 0, + "content": "2. In the second stage, the model is fine-tuned on 3450 images from DIV2K [4] and Flickr2k [101] (DF2K) and the first 10k images from LSDIR [64]. HR patches of size \\(640 \\times 640\\) are randomly cropped from HR images, and the mini-batch size is set to 32. The model is fine-tuned by minimizing the L2 loss function. The initial learning rate is set to 2e-4 and is halved every 5 epochs. The total number of epochs is 25." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.746, + 0.483, + 0.852 + ], + "angle": 0, + "content": "3. In the third stage, the model is fine-tuned again on 3450 images from DF2K and the first 10k images from LSDIR [64]. The HR patch size and minibatch size are set to \\(640 \\times 640\\) and 32, respectively. The model is fine-tuned by minimizing the L2 loss function. The initial learning rate is set to 1e-4 and is halved every 5 epochs. The total number of epochs is 20." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.856, + 0.484, + 0.901 + ], + "angle": 0, + "content": "4. In the fourth stage, the model is fine-tuned on 3450 images from DF2K and the first \\(10\\mathrm{k}\\) images from LSDIR [64]. The HR patch size and minibatch size are set" + }, + { + "type": "list", + "bbox": [ + 0.09, + 0.621, + 0.484, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.571, + 0.907, + 0.662 + ], + "angle": 0, + "content": "to \\(640 \\times 640\\) and 32, respectively. The model is fine-tuned by minimizing the L2 loss function. The learning rate is set to 5e-5, and the total number of epochs is 10. To prevent over-fitting, the model ensemble via stochastic weight averaging [46] (SWA) is performed during the last 8 epochs to obtain the final model for testing." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.68, + 0.689, + 0.696 + ], + "angle": 0, + "content": "4.22. LVGroup_HFUT" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.704, + 0.907, + 0.901 + ], + "angle": 0, + "content": "General Method Description. The Swift Parameter-free Attention Network (SPAN) [112] introduces a novel parameter-free attention mechanism to address the tradeoff between performance and computational complexity, as shown in 23. SPAN employs symmetric activation functions (e.g., shifted Sigmoid) applied to convolutional layer outputs to generate attention maps without learnable parameters, enhancing high-contribution features while suppressing redundant information. Residual connections within each Swift Parameter-free Attention Block (SPAB) mitigate information loss and preserve low-level features. The lightweight architecture with cascaded SPABs achieves fast inference by avoiding parameter-heavy attention computa" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.925, + 0.508, + 0.937 + ], + "angle": 0, + "content": "23" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.104, + 0.09, + 0.898, + 0.472 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.303, + 0.484, + 0.696, + 0.499 + ], + "angle": 0, + "content": "Figure 22. Team JUN620: The network architecture of RepRLFN" + }, + { + "type": "image", + "bbox": [ + 0.097, + 0.527, + 0.48, + 0.624 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.102, + 0.635, + 0.472, + 0.65 + ], + "angle": 0, + "content": "Figure 23. LVGroup_HFUT: The overall framework of SPAN." + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.725, + 0.483, + 0.756 + ], + "angle": 0, + "content": "tions while maintaining reconstruction quality through hierarchical feature aggregation and pixel-shuffle upsampling." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.78, + 0.484, + 0.9 + ], + "angle": 0, + "content": "Training Details. They trained the SPAN model [112] on a mixed dataset composed of DIV2K [104] and LSDIR [64], setting feature_channels to 48, where the crop size of images is \\(256 \\times 256\\). They used the Adam optimizer with L1 loss, an initial learning rate of 5e-4, and trained for a total of 1000k iterations, halving the learning rate every 200k iterations. Training was completed using a single NVIDIA RTX 4090 GPU." + }, + { + "type": "image", + "bbox": [ + 0.517, + 0.523, + 0.905, + 0.568 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.513, + 0.578, + 0.907, + 0.606 + ], + "angle": 0, + "content": "Figure 24. Team YG: The Spatial-gate self-distillation network (SGSDN)." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.63, + 0.587, + 0.643 + ], + "angle": 0, + "content": "4.23.YG" + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.652, + 0.678, + 0.665 + ], + "angle": 0, + "content": "4.23.1. Method Details." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.671, + 0.907, + 0.746 + ], + "angle": 0, + "content": "The Primary idea of the proposed SGSDN is to explore nonlocal information in a SA-like manner while modeling local details for efficient image super-resolution. This section will start by introducing the overall architecture of SGSDN and then explain the SGM and ESD in detail." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.747, + 0.907, + 0.822 + ], + "angle": 0, + "content": "Network Architecture The overall structure of the SGSDN is shown in Fig. 24. It consists of three stages: shallow feature extraction, deep feature extraction, and image reconstruction. First, they use a \\(3 \\times 3\\) convolutional layer to extract shallow features, which is expressed as:" + }, + { + "type": "equation", + "bbox": [ + 0.633, + 0.831, + 0.905, + 0.848 + ], + "angle": 0, + "content": "\\[\n\\mathbf {I} _ {s} = F _ {\\text {C o n v 3} \\times 3} (\\mathbf {I} _ {L R}), \\tag {9}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.856, + 0.907, + 0.901 + ], + "angle": 0, + "content": "where, \\( F_{Conv3 \\times 3} \\) represents the shallow feature extraction module using a \\( 3 \\times 3 \\) convolutional layer. The obtained shallow feature is denoted as \\( \\mathbf{I}_s \\). Subsequently, the extracted" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.925, + 0.509, + 0.937 + ], + "angle": 0, + "content": "24" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.154, + 0.09, + 0.422, + 0.208 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.222, + 0.483, + 0.263 + ], + "angle": 0, + "content": "Figure 25. Team YG: The details of each component. (a) SGM: Spatial-gate modulation module; (b) ESD: Enhanced self-distillation module." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.29, + 0.484, + 0.334 + ], + "angle": 0, + "content": "shallow features are fed to several stacked SGSDBs to produce deep representative features, This process can be expressed as:" + }, + { + "type": "equation", + "bbox": [ + 0.163, + 0.344, + 0.483, + 0.362 + ], + "angle": 0, + "content": "\\[\n\\mathbf {I} _ {k} = F _ {S G S D B} ^ {k} \\left(\\mathbf {I} _ {k - 1}\\right), k = 1, \\dots , n, \\tag {10}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.372, + 0.484, + 0.448 + ], + "angle": 0, + "content": "where, \\( F_{SGSDB}^{k}(\\cdot) \\) represents the \\( k \\)-th SGSDB, \\( \\mathbf{I}_{k-1} \\) and \\( \\mathbf{I}_k \\) denote the input and output features of the \\( k \\)-th SGSDB, respectively. Each SGSDB consists of three SGMs and an ESD. Given an input feature \\( \\mathbf{I}_t \\), the mapping process of SGSDB can be represented as:" + }, + { + "type": "equation", + "bbox": [ + 0.208, + 0.458, + 0.482, + 0.531 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\mathbf {I} _ {d _ {1}} = F _ {S G M} (\\mathbf {I} _ {t}), \\\\ \\mathbf {I} _ {d _ {2}} = F _ {S G M} (\\mathbf {I} _ {d _ {1}}), \\\\ \\mathbf {I} _ {d _ {3}} = F _ {S G M} \\left(\\mathbf {I} _ {d _ {2}}\\right) + \\mathbf {I} _ {t}, \\\\ \\mathbf {I} _ {o} = F _ {E S D} (\\mathbf {I} _ {d _ {3}}) + \\mathbf {I} _ {d _ {3}} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.54, + 0.484, + 0.661 + ], + "angle": 0, + "content": "where, \\( F_{SGM} \\) represents the SGM, \\( F_{ESD} \\) represents the ESD. After the deep feature extraction block, the representative features are processed by a \\( 3 \\times 3 \\) standard convolution layer and a pixel shuffle operation [94] to reconstruct the high-quality SR image. To take advantage of high-frequency information, they insert a long-distance residual connection before the image reconstruction module. The reconstruction stage is described as follows" + }, + { + "type": "equation", + "bbox": [ + 0.128, + 0.672, + 0.483, + 0.688 + ], + "angle": 0, + "content": "\\[\n\\mathbf {I} _ {S R} = F _ {\\text {P i x e l S h u f f l e}} \\left(F _ {\\text {C o n v 3} \\times 3} \\left(\\mathbf {I} _ {d} + \\mathbf {I} _ {s}\\right)\\right), \\tag {12}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.698, + 0.483, + 0.759 + ], + "angle": 0, + "content": "where \\(\\mathbf{I}_d\\) denotes the deep feature obtained by the stacked SGSDBs, and \\(F_{Conv3\\times 3}(\\cdot)\\) indicates the \\(3\\times 3\\) standard convolution layer. \\(F_{PixelShuffle}(\\cdot)\\) is used to upscale the final feature and output the SR reconstructed image \\(\\mathbf{I}_{SR}\\)." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.759, + 0.484, + 0.819 + ], + "angle": 0, + "content": "Finally, to train the network, they use the \\(L_{1}\\) loss function to minimize the pixel-level difference between the ground truth image \\(\\mathbf{I}_{GT}\\) and the reconstructed image \\(\\mathbf{I}_{SR}\\), which can be expressed as:" + }, + { + "type": "equation", + "bbox": [ + 0.213, + 0.829, + 0.483, + 0.847 + ], + "angle": 0, + "content": "\\[\nL _ {1} = \\left\\| \\mathbf {I} _ {S R} - \\mathbf {I} _ {G T} \\right\\| _ {1}, \\tag {13}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.856, + 0.484, + 0.902 + ], + "angle": 0, + "content": "At the same time, they notice that only using the pixelwise loss function can not effectively generate more high-frequency details [15]. Thus, they accordingly employ a" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.092, + 0.905, + 0.122 + ], + "angle": 0, + "content": "frequency constraint to regularize network training. The adopted loss function for the network training is defined as" + }, + { + "type": "equation", + "bbox": [ + 0.588, + 0.129, + 0.905, + 0.145 + ], + "angle": 0, + "content": "\\[\nL = L _ {1} + \\lambda \\| \\mathcal {F} (\\mathbf {I} _ {S R}) - \\mathcal {F} (\\mathbf {I} _ {G T}) \\|. \\tag {14}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.153, + 0.905, + 0.182 + ], + "angle": 0, + "content": "where \\(\\mathcal{F}\\) represents the Fast Fourier Transform, and \\(\\lambda\\) is a weight parameter which is empirically set to 0.1." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.183, + 0.907, + 0.378 + ], + "angle": 0, + "content": "Spatial-gate modulation module Considering that the reason why the ViT-based model performs well is that SA explores non-local information and expands the effective receptive field of the model. They develop a lightweight spatial-gate modulation (SGM) module to collaboratively extract representative features, where the SAL branch exploits non-local features in a larger receptive field by integrating the dilated depth-wise convolutional layers with horizontal and vertical 1-D kernels, and the LKG branch captures local features in parallel. Moreover, to avoid potential block artifacts aroused by dilation, they adopt the gate mechanism to recalibrate the generated feature maps adaptively, as shown in Fig. 25." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.378, + 0.906, + 0.454 + ], + "angle": 0, + "content": "Given the input feature \\(\\mathbf{I}_{in} \\in R^{C \\times H \\times W}\\), where \\(H \\times W\\) denotes the spatial size and \\(C\\) is the number of channels. Specifically, they first apply a normalization layer and a point-by-point convolution to normalize information and expand the channel." + }, + { + "type": "equation", + "bbox": [ + 0.606, + 0.462, + 0.905, + 0.478 + ], + "angle": 0, + "content": "\\[\n\\mathbf {I} _ {1} = F _ {\\text {C o n v 1} \\times 1} \\left(F _ {\\text {N o r m}} \\left(\\mathbf {I} _ {\\text {i n}}\\right)\\right), \\tag {15}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.484, + 0.906, + 0.56 + ], + "angle": 0, + "content": "where, \\( F_{Norm} \\) represents the \\( L_2 \\) normalization and \\( F_{Conv1\\times 1} \\) denotes a \\( 1\\times 1 \\) convolutional layer, \\( \\mathbf{I}_1\\in R^{2C\\times H\\times W} \\). Subsequently, the obtained features \\( \\mathbf{I}_1 \\) are split into two parts along the channel dimension, this process can be expressed as:" + }, + { + "type": "equation", + "bbox": [ + 0.634, + 0.567, + 0.905, + 0.584 + ], + "angle": 0, + "content": "\\[\n\\mathbf {I} _ {x}, \\mathbf {I} _ {y} = F _ {S} \\left(F _ {G} \\left(\\mathbf {I} _ {1}\\right)\\right), \\tag {16}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.59, + 0.906, + 0.756 + ], + "angle": 0, + "content": "where \\( F_{G} \\) denotes the GELU activation function [38], \\( F_{S} \\) denotes a channel splitting operation, \\( \\mathbf{I}_x \\in R^{C \\times H \\times W} \\) and \\( \\mathbf{I}_y \\in R^{C \\times H \\times W} \\). They then process the features \\( \\mathbf{I}_x \\) and \\( \\mathbf{I}_y \\) in parallel via the SAL and LKG branches, producing the non-local feature \\( \\mathbf{I}_n \\) and local feature \\( \\mathbf{I}_l \\), respectively. It is worth mentioning that the SAL and LKG branches only need to be responsible for half the input signals, and the parallel processing is faster. Finally, they fuse the non-local feature \\( \\mathbf{I}_n \\) and local feature \\( \\mathbf{I}_l \\) together with channel concatenation to form a representative output of the SGM module. This process can be expressed as," + }, + { + "type": "equation", + "bbox": [ + 0.638, + 0.763, + 0.905, + 0.78 + ], + "angle": 0, + "content": "\\[\n\\mathbf {I} _ {S G M} = F _ {C} \\left(\\mathbf {I} _ {n}, \\mathbf {I} _ {l}\\right), \\tag {17}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.787, + 0.905, + 0.816 + ], + "angle": 0, + "content": "where, \\(\\mathbf{I}_{DSG}\\) is the output feature and \\(F_{C}(\\cdot)\\) is the channel cascade operation." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.817, + 0.905, + 0.861 + ], + "angle": 0, + "content": "SA-like branch They exploit non-local features in a larger receptive field by integrating the dilated depth-wise convolutional layers with horizontal and vertical 1-D kernels." + }, + { + "type": "equation", + "bbox": [ + 0.575, + 0.869, + 0.905, + 0.904 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\mathbf {I} _ {o} = F _ {D ^ {3} W C o n v 5 \\times 1 1} \\left(F _ {D W C o n v 5 \\times 1} \\right. \\tag {18} \\\\ \\left(F _ {D ^ {3} W C o n v 1 \\times 1 1} \\left(F _ {D W C o n v 1 \\times 5} (\\mathbf {I} _ {m})\\right)\\right)) \\\\ \\end{array}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.925, + 0.508, + 0.936 + ], + "angle": 0, + "content": "25" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.09, + 0.091, + 0.485, + 0.381 + ], + "angle": 0, + "content": "where \\( F_{DWConv1 \\times 5}(\\cdot) \\) denotes the DWConv layer with a kernel of size \\( 1 \\times 5 \\), \\( F_{D^3 WConv1 \\times 11}(\\cdot) \\) signifies the DWConv layer with a kernel of size \\( 1 \\times 11 \\) and the dilated factor is set to 3, \\( F_{DWConv5 \\times 1}(\\cdot) \\) denotes the DWConv layer with a kernel of size \\( 5 \\times 1 \\), \\( F_{D^3 WConv11 \\times 1}(\\cdot) \\) signifies the DWConv layer with a kernel of size \\( 11 \\times 1 \\) and the dilated factor is set to 3. Given that increasing the convolution kernel directly will greatly increase the parameter and computation amount, as well as increase the inference time of the model, whereas utilizing the dilated depth-wise convolutional layers with horizontal and vertical 1-D kernels will alleviate the problem. In this way, the information extraction capability of the convolutional layer is further enhanced without greatly increasing the number of computations. Moreover, to avoid potential block artifacts arising from dilation, they adopt the gate mechanism to recalibrate the generated feature maps adaptively. Finally, they use a \\( 1 \\times 1 \\) convolution to distill the output feature for extracting the representative structure information \\( \\mathbf{I}_n \\)." + }, + { + "type": "equation", + "bbox": [ + 0.203, + 0.391, + 0.483, + 0.408 + ], + "angle": 0, + "content": "\\[\n\\mathbf {I} _ {n} = F _ {\\text {C o n v 1} \\times 1} \\left(\\mathbf {I} _ {o} * \\mathbf {I} _ {y}\\right) \\tag {19}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.42, + 0.458, + 0.434 + ], + "angle": 0, + "content": "where \\(*\\) represents the element-wise product operation." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.435, + 0.484, + 0.601 + ], + "angle": 0, + "content": "Local spatial-gate branch Local details are important for the pleasing high-frequency reconstruction. As the SAL branch prioritizes non-local structure information exploration, they develop a simple local spatial-gate branch to capture local features simultaneously. In detail, a \\(3 \\times 3\\) depth-wise convolution is used to encode local information from the input features \\(\\mathbf{I}_x\\). Then, they use the gate mechanism to generate the enhanced local feature. Finally, they use a \\(1 \\times 1\\) convolution with a GELU activation to distill the output features for extracting the representative detail information \\(\\mathbf{I}_l\\), which is achieved by," + }, + { + "type": "equation", + "bbox": [ + 0.188, + 0.612, + 0.482, + 0.636 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\mathbf {I} _ {o} = F _ {D W C o n v 3 \\times 3} (\\mathbf {I} _ {x}) * \\mathbf {I} _ {y}, \\\\ \\mathbf {I} _ {o} = F _ {D W C o n v 3 \\times 3} (\\mathbf {I} _ {x}). \\end{array} \\tag {20}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.192, + 0.631, + 0.358, + 0.647 + ], + "angle": 0, + "content": "\\[\n\\mathbf {I} _ {l} = F _ {G} \\left(F _ {\\text {C o n v 1} \\times 1} \\left(\\mathbf {I} _ {o}\\right)\\right)\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.659, + 0.483, + 0.703 + ], + "angle": 0, + "content": "where \\( F_{DWConv3 \\times 3}(\\cdot) \\) denotes the DWConv layer with a kernel of size \\( 3 \\times 3 \\), \\( F_{G} \\) represents GELU activation function." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.705, + 0.484, + 0.871 + ], + "angle": 0, + "content": "Enhanced self-distillation module They present an enhanced self-distillation (ESD) module to expand and refine the features derived from the SGM in spatial and channel dimensions further. The ESD uses a \\(3 \\times 3\\) depth-wise convolutional to expand spatial and channel information. Then they use the GLUE activation function to introduce nonlinearity and extend the representation of the network. Finally, the output features are fed into a \\(1 \\times 1\\) convolution for further feature mixing and reducing the hidden channel back to the original input dimension. Given the input feature \\(\\mathbf{I}_{in} \\in R^{C \\times H \\times W}\\), this process can be formulated as," + }, + { + "type": "equation", + "bbox": [ + 0.148, + 0.885, + 0.483, + 0.903 + ], + "angle": 0, + "content": "\\[\n\\mathbf {I} _ {l} = F _ {\\text {C o n v 1} \\times 1} \\left(F _ {G} \\left(F _ {\\text {D W C o n v 3} \\times 3} \\left(\\mathbf {I} _ {i n}\\right)\\right)\\right) \\tag {21}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.092, + 0.905, + 0.198 + ], + "angle": 0, + "content": "Training Details. Following previous works [66], they use the DF2K dataset, which consists of 800 images from DIV2K [4] and 2650 images from Flickr2K [70] as the training dataset. A sliding window slicing operation is used to decompose each HR image into \\(480 \\times 480\\) patches for training. The LR images are obtained by downsampling the HR images using the MATLAB bicubic kernel function." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.199, + 0.906, + 0.41 + ], + "angle": 0, + "content": "During the training, random rotation and horizontal flipping are used for data augmentation. The proposed SGSDN has 8 SGSDBs, in which the number of feature channels is set to 24. They start by pretraining the model on the DIV2K and Flickr2K datasets. The mini-batch size is set to 64. The model is trained by the ADAN optimizer [124] with \\(\\beta_{1} = 0.98\\), \\(\\beta_{2} = 0.92\\) and \\(\\beta_{3} = 0.99\\), and the exponential moving average (EMA) is set to 0.999 to stabilize training. The initial and minimum learning rates are set to \\(5 \\times 10^{-3}\\) and \\(1 \\times 10^{-6}\\), respectively, and decay according to cosine learning rate. The model is optimized using a combination of the \\(L_{1}\\) loss and an FFT-based frequency loss function [15] for a total of \\(1 \\times 10^{6}\\) iterations. The size of the randomly cropped LR patches is \\(64 \\times 64\\)." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.411, + 0.906, + 0.532 + ], + "angle": 0, + "content": "They then conduct fine-tuning on the DIV2K dataset and the first 10k images from LSDIR [64]. The input size is set to \\(96 \\times 96\\), with a batch size of 32. The fine-tuning process optimizes the model by starting with an initial learning rate of \\(3 \\times 10^{-3}\\), while keeping the rest consistent with pretraining. The fine-tuning phase encompasses a total of 100k iterations. They implemented our model on an NVIDIA RTX 3090 GPU using Pytorch." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.544, + 0.625, + 0.558 + ], + "angle": 0, + "content": "4.24. NanoSR" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.567, + 0.906, + 0.718 + ], + "angle": 0, + "content": "Network Architecture. Their network architecture is inspired by SPAN [112] and PAN [142]. While maintaining the overall design of SPAN, they replace the SPAB block with the RepBlock. The RepBlock consists of a feature extractor using reparameterized convolution and a reparameterized pixel attention module. During training, the RepBlock operates in a complex mode to achieve better quality performance but can be equivalently transformed into a simple mode with fewer parameters and FLOPs. The detailed network architecture is illustrated in Fig. 26." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.719, + 0.906, + 0.84 + ], + "angle": 0, + "content": "Reparameterized Convolution. Reparameterized convolution plays a crucial role in improving the performance of efficient CNN-based super-resolution networks. They employ the RepMBCov introduced in PlainUSR [120], and this RepMBCov forms all the convolutions in the RepBlock. In addition, RepMBCov is derived from MobileNetV3 [39] Block (MBConv). The architecture of RepMBCov is depicted in Fig. 27." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.84, + 0.906, + 0.902 + ], + "angle": 0, + "content": "Implementation Details. They train the model using all 85,791 image pairs from the DIV2K and LSDIR datasets. Each image pair is cropped into \\(480 \\times 480\\) sub-patches for training. During each training batch, 64 HR RGB patches" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.925, + 0.509, + 0.937 + ], + "angle": 0, + "content": "26" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.144, + 0.118, + 0.885, + 0.43 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.304, + 0.447, + 0.695, + 0.461 + ], + "angle": 0, + "content": "Figure 26. Team NanoSR: The network architecture of RepRLFN" + }, + { + "type": "image", + "bbox": [ + 0.166, + 0.507, + 0.411, + 0.739 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.092, + 0.763, + 0.483, + 0.779 + ], + "angle": 0, + "content": "Figure 27. Team NanoSR: The network architecture of RepRLFN" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.81, + 0.484, + 0.903 + ], + "angle": 0, + "content": "of size \\(128 \\times 128\\) are randomly cropped and augmented with random flipping and rotation. The optimization objective is the \\(\\ell_1\\) loss, and they use the AdamW optimizer (\\(\\beta_{1} = 0.9\\), \\(\\beta_{2} = 0.99\\)) to train NanoSR. The learning rate is initialized at \\(5 \\times 10^{-4}\\) and halved at \\(\\{250\\mathrm{k}, 400\\mathrm{k}, 450\\mathrm{k}, 475\\mathrm{k}\\}\\) iterations within a total of \\(500\\mathrm{k}\\) iterations. The proposed" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.488, + 0.907, + 0.518 + ], + "angle": 0, + "content": "method is implemented using the PyTorch framework on a single NVIDIA RTX 4090 GPU." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.587, + 0.757, + 0.604 + ], + "angle": 0, + "content": "4.25. MegastudyEdu_Vision.AI" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.629, + 0.907, + 0.903 + ], + "angle": 0, + "content": "General Method Description. To effectively model long-range dependency and extensive receptive field, inspired by CFSR [122], they propose the multi-scale aggregation attention network (MAAN), as illustrated in Fig. 28. MAAN reconstructs high-quality images through a shallow feature extractor, a stack of three residual multi-scale aggregation blocks (RMAB) composed of multi-scale aggregation attention layers (MAAL), a large separable kernel attention tail (LSKAT), and an image reconstruction module. Specially, MAAL captures global and local details via a multi-scale mixer and efficient feed-forward network (EFN) [122]. Given a low-resolution input image \\( I_{LR} \\in \\mathbb{R}^{3 \\times H \\times W} \\), shallow features such as edges, textures, and fine details are extracted using a \\( 3 \\times 3 \\) convolution in the shallow feature extraction stage and passed to the MAAL. As shown in Fig. 28, the MAAL processing pipeline begins with an input \\( X \\), applying layer normalization, followed by a \\( 1 \\times 1 \\) convolution and splitting the feature map into four groups" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.925, + 0.509, + 0.937 + ], + "angle": 0, + "content": "27" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.099, + 0.089, + 0.896, + 0.328 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.204, + 0.336, + 0.793, + 0.351 + ], + "angle": 0, + "content": "Figure 28. Team MegastudyEdu_Vision.AI: Overview of multi-scale aggregation attention network." + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.378, + 0.289, + 0.392 + ], + "angle": 0, + "content": "along the channel dimension:" + }, + { + "type": "equation", + "bbox": [ + 0.262, + 0.403, + 0.393, + 0.418 + ], + "angle": 0, + "content": "\\[\nV = \\operatorname {C o n v} _ {1 \\times 1} (X),\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.24, + 0.422, + 0.482, + 0.445 + ], + "angle": 0, + "content": "\\[\nF _ {\\text {g a t e}} = \\operatorname {C o n v} _ {1 \\times 1} (X), \\tag {22}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.103, + 0.441, + 0.445, + 0.476 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} F _ {i d}, F _ {g a t e 1}, F _ {g a t e 2}, F _ {g a t e 3} = \\operatorname {S p l i t} (F _ {g a t e}), \\\\ = F _ {: g}, F _ {g: 2 g}, F _ {2 g: 3 g}, F _ {3 g}. \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.484, + 0.484, + 0.576 + ], + "angle": 0, + "content": "Here, \\( F_{id} \\) is the identity mapping without channel modification. The channel count used in convolution branches, denoted as \\( g \\), is determined by a ratio \\( r_g \\), computed as \\( g = r_g C \\). They set \\( r_g \\) to 0.25. Subsequently, each branch is processed using large separable kernel (LSK), inspired by large separable kernel attention (LSKA) [57]:" + }, + { + "type": "equation", + "bbox": [ + 0.215, + 0.587, + 0.289, + 0.605 + ], + "angle": 0, + "content": "\\[\nF _ {i d} ^ {\\prime} = F _ {i d},\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.196, + 0.609, + 0.482, + 0.636 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{c} F _ {g a t e 1} ^ {\\prime} = L S K _ {1 1, 2} \\left(F _ {g a t e 1}\\right), \\\\ \\end{array} \\tag {23}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.196, + 0.631, + 0.38, + 0.65 + ], + "angle": 0, + "content": "\\[\nF _ {g a t e 2} ^ {\\prime} = L S K _ {2 3, 3} \\left(F _ {g a t e 2}\\right),\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.196, + 0.654, + 0.38, + 0.673 + ], + "angle": 0, + "content": "\\[\nF _ {g a t e 3} ^ {\\prime} = L S K _ {3 5, 3} \\left(F _ {g a t e 3}\\right),\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.677, + 0.483, + 0.767 + ], + "angle": 0, + "content": "where \\(LSK_{k,d}\\) indicates the kernel size \\(k\\) and dilation factor \\(d\\). Each LSK is composed of consecutive \\(1 \\times k\\) depth-wise convolution, \\(k \\times 1\\) depth-wise convolution, \\(1 \\times k\\) dilated depth-wise convolution, and \\(k \\times 1\\) dilated depth-wise convolution. The distinct kernel sizes and dilation factors across branches effectively handle multi-scale features." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.769, + 0.484, + 0.829 + ], + "angle": 0, + "content": "After concatenating the outputs from each branch, the combined result is integrated with \\( V \\) through an element-wise product. Subsequently, \\( 1 \\times 1 \\) convolution is applied to obtain the final output as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.091, + 0.838, + 0.485, + 0.87 + ], + "angle": 0, + "content": "\\[\nF _ {o u t} = \\operatorname {C o n v} _ {1 \\times 1} \\left(V \\odot \\operatorname {C o n c a t} \\left(F _ {i d} ^ {\\prime}, F _ {\\text {g a t e} 1} ^ {\\prime}, F _ {\\text {g a t e} 2} ^ {\\prime}, F _ {\\text {g a t e} 3} ^ {\\prime}\\right)\\right) \\tag {24}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.871, + 0.484, + 0.902 + ], + "angle": 0, + "content": "This \\(F_{out}\\) is then fed into EFN [122]. For further EFN details, refer to CFSR [122]." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.377, + 0.905, + 0.468 + ], + "angle": 0, + "content": "While CFSR [122] employs a \\(3 \\times 3\\) convolution tail for deep feature extraction, it has limitations in establishing long-range connections, restricting the representational capability of reconstructed features. To overcome this, they propose LSKAT inspired by the large kernel attention tail(LKAT) [119], as depicted in Fig. 28." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.469, + 0.907, + 0.649 + ], + "angle": 0, + "content": "Training Details. Their approach leverages DIV2K[103], Flickr2K[70], and the first 10K portion of LSDIR[64]. In each RMAB, the number of channels, RMABs, and MAALs are set to 48, 3, and 2-3-2, respectively. During training, they used 256 HR RGB patches with a batch size of 64. Data augmentation included random flips and rotations. Parameters are optimized using the L1 loss and the Adam optimizer[54]. The learning rate started at \\(1 \\times 10^{-3}\\) and decreasing to \\(1 \\times 10^{-6}\\) using a cosine annealing scheduler. The network is trained for 1,000K iterations, implemented in PyTorch, and executed on an NVIDIA RTX 3090 GPU." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.665, + 0.608, + 0.679 + ], + "angle": 0, + "content": "4.26.MILA" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.688, + 0.906, + 0.854 + ], + "angle": 0, + "content": "General Method Description. As shown in Figure 29, inspired by the efficient approximation of self-attention (EASA) [144], they introduce local variance and design LVSA. Additionally, inspired by MDRN [81] and AGDN [114], they consider the impact of multi-level branches on performance. Therefore, they design a multi-level variance feature modulation block that incorporates non-local information with local variance perception at two different levels. This design aims to better leverage the interplay between local and non-local features while balancing performance and model complexity." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.856, + 0.906, + 0.902 + ], + "angle": 0, + "content": "The gated-dconv feed-forward network (GDFN) [132] introduces gating mechanism and depth-wise convolutions to encode information from spatially adjacent pixel posi" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.925, + 0.508, + 0.937 + ], + "angle": 0, + "content": "28" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.09, + 0.092, + 0.482, + 0.168 + ], + "angle": 0, + "content": "tions, which is highly useful for learning local image structures to achieve effective restoration. However, the single gating structure is relatively simple and cannot effectively capture and blend local contextual information. Therefore, they propose the symmetric gated feed-forward network." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.177, + 0.483, + 0.223 + ], + "angle": 0, + "content": "Training Description. The proposed MVFMNet has 6 FMMs, in which the number of feature channels is set to 26. The details of the training steps are as follows:" + }, + { + "type": "text", + "bbox": [ + 0.086, + 0.232, + 0.482, + 0.352 + ], + "angle": 0, + "content": "1. Pretraining on the DF2K and the first 1k images of LSDIR datasets. HR patches of size \\(256 \\times 256\\) are randomly cropped from HR images, and the mini-batch size is set to 64. The model is trained by minimizing L1 loss and the frequency loss [14] with Adam optimizer for total 100k iterations. They set the initial learning rate to \\(1 \\times 10^{-3}\\) and the minimum one to \\(1 \\times 10^{-6}\\), which is updated by the Cosine Annealing scheme [78]." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.353, + 0.482, + 0.444 + ], + "angle": 0, + "content": "2. Finetuning on the DF2K and the first 1k images of LSDIR datasets. HR patch size and mini-batch size are set to \\(256 \\times 256\\) and 64, respectively. The model is fine-tuned by minimizing the L2 loss function. The learning rate is initialized at \\(2 \\times 10^{-5}\\) and gradually decreased to \\(1 \\times 10^{-8}\\) over 500k iterations using the Cosine Annealing scheme." + }, + { + "type": "list", + "bbox": [ + 0.085, + 0.232, + 0.482, + 0.444 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.482, + 0.212, + 0.496 + ], + "angle": 0, + "content": "4.27. AiMF_SR" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.513, + 0.484, + 0.71 + ], + "angle": 0, + "content": "Method Details. They propose a novel Mixture of Efficient Attention (MoEA) architecture for efficient superresolution tasks. The architecture includes a shallow feature extractor, multiple Feature Representation Modules (FRMs), and an efficient reconstruction and upsampling module. Initially, a shallow \\(3 \\times 3\\) convolutional layer reduces computational load, generating compact feature representations. Deep feature extraction employs transformer-inspired blocks with pre-normalization, incorporating Mixture-of-Experts (MoE) Blocks [131] for efficient attention and Depth Feed Forward Networks (DepthFFN) for capturing depth-wise interactions. Details of the architecture can be seen in Fig. 30." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.72, + 0.484, + 0.901 + ], + "angle": 0, + "content": "The MoEBlock consists of two parallel feature pathways (Fig. 30). The input features \\( x \\) are first projected into two distinct feature sets \\( x_{a} \\) and \\( x_{b} \\) using a pointwise convolution. The first branch, \\( x_{a} \\), undergoes both adaptive average and max pooling followed by depth-wise convolutions. The pooling is done in scale of 8 [145]. These pooling layers followed by depth-wise convolutions serve as efficient attention-like mechanism. Then, it combines these features through element-wise addition, nonlinear activation (GELU), and interpolation. The second branch, \\( x_{b} \\), is processed via depth-wise and pointwise convolutions with GELU activation." + }, + { + "type": "equation", + "bbox": [ + 0.516, + 0.128, + 0.912, + 0.233 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} x _ {a} = \\operatorname {D W C o n v} \\left(\\operatorname {A v g P o o l} \\left(x _ {a}\\right)\\right) + \\operatorname {D W C o n v} \\left(\\operatorname {M a x P o o l} \\left(x _ {a}\\right)\\right), \\\\ x _ {a} ^ {\\prime} = \\mathcal {U} (\\mathcal {G} (\\operatorname {P W C o n v} (x _ {a}))), \\\\ x _ {a} ^ {\\prime} = \\operatorname {P W C o n v} \\left(x _ {a} ^ {\\prime}\\right), \\\\ x _ {b} ^ {\\prime} = \\mathcal {G} (\\operatorname {P W C o n v} (\\operatorname {D W C o n v} (x _ {b}))), \\\\ x _ {a b} = \\mathcal {C} \\left(x _ {a} ^ {\\prime}, x _ {b} ^ {\\prime}\\right). \\tag {25} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.238, + 0.905, + 0.344 + ], + "angle": 0, + "content": "where \\( x_{a}, x_{b} \\) are concatenated and passed through the Router (gating network), \\( \\mathcal{R} \\), which adaptively selects the top- \\( k \\) expert paths based on the channel-wise global average-pooled features in the MoE-layer. Each selected expert independently processes \\( x_{a}' \\) and \\( x_{b}' \\) through pointwise convolutions, multiplies them element-wise, and applies a final convolution for feature integration:" + }, + { + "type": "equation", + "bbox": [ + 0.517, + 0.386, + 0.904, + 0.452 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\operatorname {l o g i t s} = \\mathcal {R} (x _ {a b}), \\\\ x _ {a} ^ {\\prime}, x _ {b} ^ {\\prime} = \\operatorname {T o p K} (\\operatorname {S o f t m a x} (\\log_ {i} i)) \\\\ \\operatorname {E x p e r t} \\left(x _ {a} ^ {\\prime}, x _ {b} ^ {\\prime}\\right) = \\operatorname {P W C o n v} \\left[ \\operatorname {P W C o n v} \\left(x _ {a} ^ {\\prime}\\right) \\times \\operatorname {P W C o n v} \\left(x _ {b} ^ {\\prime}\\right) \\right] \\tag {26} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.458, + 0.906, + 0.623 + ], + "angle": 0, + "content": "Multiple FRMs (LayerNorm-MoEBlock-LayerNorm-DepthFFN sequences) are stacked for deep feature extraction. For reconstruction, global contextual features from deep extraction combine with shallow features via residual connections, followed by PixelShuffle-based upsampling to produce high-resolution outputs. The model uses GELU activation, Layer Normalization. Their MoE layer dynamically routes features across numExperts \\(= 3\\), selecting the top \\(k = 1\\) experts at training time, allowing a flexible and adaptive processing pipeline tailored specifically to input feature characteristics." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.629, + 0.907, + 0.901 + ], + "angle": 0, + "content": "Training Strategy. The model is trained and tested on BasicSR [115] setting. First, the model is initially trained on DIV2K_LSDIR_x2, then further finetuned with DIV2K_LSDIR_x3 dataset for 500,000 iterations respectively, in which these scales are made with bicubic downsampling. The x4 scale model is finetuned on top of the x3 model over 500,000 iterations with the initial learning rate of \\(1 \\times 10^{-3}\\) using the Adam optimizer. The learning rate decayed at iterations [250,000, 400,000, 450,000, 475,000]. The training pipeline included data augmentations such as random horizontal flips, vertical flips and rotations. The model is optimized using L1 Loss and Fast Fourier Transform (FFT) Loss [95] with 1.0 and 0.1 weights, respectively. All reported implementations are carried out using Python (version 3.9) programming language and PyTorch Framework, utilizing one RTX4090, 24GB VRAM and 16-core CPU. Training is conducted over approximately 23 days with a single GPU of batch size of 16." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.925, + 0.508, + 0.937 + ], + "angle": 0, + "content": "29" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.101, + 0.09, + 0.899, + 0.199 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.1, + 0.201, + 0.899, + 0.349 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.1, + 0.35, + 0.898, + 0.438 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.128, + 0.442, + 0.16, + 0.467 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.165, + 0.449, + 0.29, + 0.462 + ], + "angle": 0, + "content": "Adaptive Max Pooling" + }, + { + "type": "image", + "bbox": [ + 0.328, + 0.443, + 0.359, + 0.466 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.361, + 0.45, + 0.448, + 0.461 + ], + "angle": 0, + "content": "Local Variance" + }, + { + "type": "image", + "bbox": [ + 0.498, + 0.443, + 0.527, + 0.466 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.534, + 0.45, + 0.657, + 0.461 + ], + "angle": 0, + "content": "Channel Concatenate" + }, + { + "type": "image", + "bbox": [ + 0.695, + 0.443, + 0.726, + 0.466 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.731, + 0.45, + 0.857, + 0.46 + ], + "angle": 0, + "content": "Element-wise Addition" + }, + { + "type": "image", + "bbox": [ + 0.129, + 0.472, + 0.159, + 0.493 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.166, + 0.476, + 0.285, + 0.488 + ], + "angle": 0, + "content": "Nearest Up-sampling" + }, + { + "type": "image", + "bbox": [ + 0.328, + 0.472, + 0.358, + 0.493 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.363, + 0.477, + 0.433, + 0.488 + ], + "angle": 0, + "content": "Chanel Split" + }, + { + "type": "image", + "bbox": [ + 0.497, + 0.472, + 0.527, + 0.493 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.534, + 0.477, + 0.626, + 0.487 + ], + "angle": 0, + "content": "GELU Activation" + }, + { + "type": "image", + "bbox": [ + 0.695, + 0.469, + 0.726, + 0.493 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.731, + 0.477, + 0.856, + 0.487 + ], + "angle": 0, + "content": "Element-wise Product" + }, + { + "type": "image_caption", + "bbox": [ + 0.278, + 0.514, + 0.718, + 0.529 + ], + "angle": 0, + "content": "Figure 29. Team MILA: Network architecture of the proposed MVFMNet." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.555, + 0.206, + 0.57 + ], + "angle": 0, + "content": "4.28. BVIVSR" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.584, + 0.484, + 0.901 + ], + "angle": 0, + "content": "Method Description. Their solution is built on the advances in state-of-the-art single-image super-resolution (SISR) methods [11, 18, 87, 141, 149], particularly the efficient Transformer-based models [52, 139], the continuous super-resolution approaches, such as HiIF [49, 52], and the knowledge distillation strategies [48, 50, 51]. They employ an efficient Transformer-based network architecture, as illustrated in Fig. 31, where the core component is the Hierarchical Encoding Transformer (HiET) layer. The HiET layer was first proposed in [52] and it is specifically designed to capture rich structural dependencies across various regions of the image, enabling the model to handle complex visual patterns effectively. To enhance the capacity of the model for multi-scale feature representations, each HiET layer is set with different window sizes, allowing it to attend to both local and global contexts. Furthermore, the overall architecture incorporates a modified U-Net structure, where skip connections are introduced between symmetric HiET layers at different depths. This design facilitates efficient multi-level feature fusion and ensures better preservation and reconstruction of fine-grained details" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.556, + 0.907, + 0.631 + ], + "angle": 0, + "content": "in the super-resolved outputs. In addition, they also apply the multi-teacher knowledge distillation strategy [48] to improve the performance of the lightweight C2D-ISR model, where SRFormer [147], MambaIR [32] and EDSR [70] are employed as teacher networks." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.635, + 0.909, + 0.846 + ], + "angle": 0, + "content": "Training Details. They use the DIV2K [102], 1000 2K images from BVI-AOM [82], Flickr2K [70] and 5000 images from LSDIR[64] as training dataset. For evaluation, they follow common practice and employ the DIV2K validation set (containing 100 images) [102]. The maximum learning rate is set to \\(4 \\times 10^{-4}\\). The learning rate follows a cosine annealing schedule, gradually decreasing after an initial warm-up phase of 50 epochs. They use L1 loss and the Adam [54] optimization during training. Training and testing are implemented based on 4 NVIDIA 4090 GPUs. The model comprises 154.8K parameters with an input size of \\(64 \\times 64 \\times 3\\) and it was trained for 1000 epochs with 16 batch sizes per GPU. The training of their solution contains five stages:" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.856, + 0.909, + 0.901 + ], + "angle": 0, + "content": "- Training the teacher networks, including SRFormer [147], MambaIR [32] and EDSR [70], by using the original settings in their papers;" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.925, + 0.509, + 0.937 + ], + "angle": 0, + "content": "30" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.134, + 0.127, + 0.868, + 0.476 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.208, + 0.479, + 0.789, + 0.494 + ], + "angle": 0, + "content": "Figure 30. Team AiMF_SR: Main Figure of Proposed Architecture, Mixture of Efficient Attention." + }, + { + "type": "image", + "bbox": [ + 0.147, + 0.508, + 0.87, + 0.639 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.334, + 0.649, + 0.661, + 0.663 + ], + "angle": 0, + "content": "Figure 31. Team BVIVSR: The structure of the method." + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.678, + 0.483, + 0.721 + ], + "angle": 0, + "content": "- The teacher aggregation of multi-teacher knowledge distillation (MTKD) strategy [48] was adapted to the above teacher networks to obtain an enhanced teacher network;" + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.724, + 0.483, + 0.781 + ], + "angle": 0, + "content": "- Training the lightweight C2D-ISR model [52] on continuous scales i.e, from \\(\\times 2\\) to \\(\\times 4\\), to learn the correlation between multiple scales and better recover high-frequency details;" + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.784, + 0.483, + 0.827 + ], + "angle": 0, + "content": "- The learned C2D-ISR model was distilled by the MTKD strategy [48] with their enhanced teacher network to obtain the enhanced student model;" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.829, + 0.483, + 0.859 + ], + "angle": 0, + "content": "- Finetuning the enhanced student model by increasing the patch size from \\(64 \\times 64\\) to \\(128 \\times 128\\)." + }, + { + "type": "list", + "bbox": [ + 0.091, + 0.678, + 0.483, + 0.859 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.677, + 0.649, + 0.691 + ], + "angle": 0, + "content": "4.29.CUIT_HTT" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.705, + 0.907, + 0.901 + ], + "angle": 0, + "content": "General Method Description. The overall architecture of the proposed method is illustrated in Fig. 32(a), which consists of three main components: the shallow feature extraction module, the deep feature extraction module, and the reconstruction and upsampling module. The shallow feature extraction module employs a BSConv [34] module to extract low-level features such as edges and textures from the input image \\( I^{in} \\in \\mathbb{R}^{3 \\times H \\times W} \\), mapping it to the feature space \\( f^0 \\in \\mathbb{R}^{C \\times H \\times W} \\) for further processing. The extracted shallow features are then fed into the deep feature extraction module, which is composed of multiple Frequency-Segmented Attention Blocks (FSABs) designed in this work. The outputs of each FSAB are concatenated" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.925, + 0.507, + 0.937 + ], + "angle": 0, + "content": "31" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.097, + 0.095, + 0.868, + 0.215 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.095, + 0.217, + 0.26, + 0.445 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.263, + 0.219, + 0.452, + 0.44 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.453, + 0.219, + 0.868, + 0.313 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.453, + 0.314, + 0.868, + 0.444 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.453, + 0.908, + 0.496 + ], + "angle": 0, + "content": "Figure 32. Team CUIT_HT: Schematic Diagram of the Method. (a) Overall Architecture of the Model; (b) Frequency-Segmented Attention Block (FSAB); (c) Schematic of the Enhanced Large-kernel Convolution Block (ELCB); (d) Mechanism of Frequency-Segmented Attention (FSA); (e) Frequency Division and Frequency Recombination." + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.522, + 0.482, + 0.749 + ], + "angle": 0, + "content": "along the channel dimension and adjusted using a convolutional module group, constituting the deep feature extraction process. As shown in Fig. 32(b), the FSAB structure includes a Concat operation for channel concatenation and a ConvB module group, which consists of a \\(1 \\times 1\\) convolution, a GELU activation function, and a BSCov stacked sequentially. Finally, the output of the shallow feature extraction module is added element-wise to the output of the deep feature extraction module via a skip connection and passed to the reconstruction and upsampling module. This module upsamples the feature space information \\(f^{out} \\in \\mathbb{R}^{C \\times H \\times W}\\) and maps it to the high-resolution output image \\(I^{SR} \\in \\mathbb{R}^{3 \\times scale \\times H \\times scale \\times W}\\), where scale is the upscaling factor. In this work, the PixelShuffle method is utilized for upsampling." + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.75, + 0.483, + 0.902 + ], + "angle": 0, + "content": "The Frequency-Segmented Attention Block (FSAB) primarily consists of an information distillation architecture for local feature processing and the proposed Frequency-Segmented Attention (FSA) mechanism for global feature processing. The overall architecture of FSA is illustrated in Fig. 32 (d). The input feature map is first transformed into the frequency domain via the Fast Fourier Transform (FFT), enabling global processing in the spatial domain through frequency domain operations. Inspired by windowed attention, the FDivision operation partitions the frequency spec" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.522, + 0.907, + 0.901 + ], + "angle": 0, + "content": "trum into multiple windows, which are concatenated along the channel dimension. A grouped convolution is then applied to process features in different frequency ranges using distinct weights. Subsequently, the FRecombination operation reassembles the segmented frequency windows back into the spectrum. A convolutional layer is applied, and the result is added element-wise to the original spectrum. Finally, the Inverse Fast Fourier Transform (IFFT) is used to convert the processed features back to the spatial domain, and the output is obtained through elementwise multiplication with the original input. As for the information distillation architecture, they adopt the structure of the Residual Feature Distillation Block (RFDB) from RFDN [71], as shown in Fig. 32. (b). However, they replace the convolutional layers with Enhanced Large-kernel Convolution Blocks (ELCB). This module employs large-kernel depthwise convolution on half of the channels and pointwise convolution on the full channels, achieving a large receptive field without significantly increasing the number of parameters. Additionally, structural reparameterization is utilized during training, where multiple branches with different receptive fields are employed. During inference, these branches are equivalently replaced with a single large-kernel convolution module, thereby enhancing the model's learning capability without increasing inference cost." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.925, + 0.509, + 0.937 + ], + "angle": 0, + "content": "32" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.09, + 0.092, + 0.482, + 0.288 + ], + "angle": 0, + "content": "Train details. They utilize the DIV2K [4] and Flickr2k [101] dataset and the first 10K images from the LSDIR [64] dataset as the training set for their model. During training, the dataset undergoes random horizontal flipping and \\(90^{\\circ}\\) rotation. The mini-batch size and input patch size are set to 64 and \\(64 \\times 64\\), respectively. The model is optimized using the L1 loss function and the Adam optimizer, with an initial learning rate of \\(5 \\times 10^{-3}\\). The learning rate follows a cosine annealing decay schedule over a total of 1000K iterations. Subsequently, the model is fine-tuned using the L2 loss to achieve improved performance. Training is conducted using PyTorch 1.12.1 on a Tesla P100 16G GPU." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.301, + 0.216, + 0.315 + ], + "angle": 0, + "content": "4.30. GXZY.AI" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.324, + 0.483, + 0.534 + ], + "angle": 0, + "content": "General Method Description. The GXZY AI team proposed a Parameter-free Vision Mamba, as shown in Fig. 33. The work is inspired by MambaIR [33], SPAN [112] and DVMSR [59], PFVM consists of three parts, shallow feature extraction, deep feature extraction and reconstruction module. Shallow feature extraction is achieved by \\(3 \\times 3\\) convolution, followed by the use of stacked Residue State Space Blocks (RSSBs), which contain the Vision State Space Module (VSSM) to extract deeper features through the capability of Mamba long-range modeling. Then the shallow and deep features are aggregated by a \\(3 \\times 3\\) convolution along with residual concatenation, and finally upsampling is achieved through a sub-pixel convolutional layer to reconstruct the high resolution image." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.537, + 0.483, + 0.672 + ], + "angle": 0, + "content": "As shown in Fig. 34, different from the RSSB used in DVMSR, PFVM does not use stacked ViMM modules, but follows the design paradigm of the RSSB in MambaIR, which differs from MambaIR in that 3-residue branching is used in order to maximize the ability of residual learning. In order to obtain better PSNR with approximate inference time, the convolution layer adopts the bottleneck structure, and the channel attention used in MambaIR is replaced by a parameter-free attention." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.673, + 0.483, + 0.78 + ], + "angle": 0, + "content": "Training Strategy. In the training phase, the GXZY AI team uses the LSDIR [64] dataset for training and the DIV2K [3] validation set for validation. The images in the training set are first cropped with a step size of 240 and a size of 480 to get a series of cropped images. The model was trained on 2 NVIDIA RTX 3090 GPUs. The details of the training steps are as follows:" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.781, + 0.483, + 0.901 + ], + "angle": 0, + "content": "1. The HR images are randomly cropped to size 192, and the dataset is augmented using random flipping and rotation. The model is trained from scratch with a batch size set to 64, using the Adam optimizer with the learning rate set to 0.0001, \\(\\beta_{1} = 0.9\\), \\(\\beta_{2} = 0.99\\), and a Multi-StepLR scheduler with the learning rate halved for every 200,000 iterations for a total of 1,000,000 iterations. The loss function uses L1 loss." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.092, + 0.905, + 0.198 + ], + "angle": 0, + "content": "2. On the basis of the first step, the model with the optimal PSNR on the DIV2K validation set is loaded as the pre-training model, the size of HR image cropping is adjusted to 256, the learning rate is 0.0002, the learning rate is halved for every 100,000 iterations, and the loss function is still used for 1,000,000 iterations with L1 loss." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.209, + 0.603, + 0.224 + ], + "angle": 0, + "content": "4.31. IPCV" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.231, + 0.905, + 0.428 + ], + "angle": 0, + "content": "This team uses HiT-SR: Hierarchical Transformer for Efficient Image Super-Resolution [140] for this challenge. The Hierarchical Transformer for Efficient Image Super-Resolution (HiT-SR) is a deep learning model designed to upscale low-resolution (LR) images into high-resolution (HR) outputs while maintaining efficiency and high-quality reconstruction. Unlike traditional convolutional neural networks (CNNs), which struggle to capture long-range dependencies, HiT-SR employs a hierarchical self-attention mechanism that efficiently processes multiscale image features. This allows the model to integrate local and global information, improving image detail reconstruction while reducing computational costs." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.429, + 0.906, + 0.579 + ], + "angle": 0, + "content": "At the core of the network is a hierarchical feature learning process, where image features are extracted and refined progressively through multiple stages. Instead of applying full-resolution self-attention, which is memory intensive, HiT-SR reduces token complexity using patch merging and downsampling modules, allowing efficient computation without loss of essential information. The model further refines these hierarchical features through multiscale self-attention mechanisms, ensuring that fine-grained details and global structures are effectively captured." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.58, + 0.906, + 0.716 + ], + "angle": 0, + "content": "For the final super-resolution reconstruction, HiT-SR aggregates and progressively upsamples the processed features. This multistage refinement approach ensures that high-frequency details are preserved while preventing artifacts common in naive upsampling techniques. The resulting HR image maintains sharp edges, realistic textures, and minimal distortions. They have used available pre-trained model weights [134] on the low resolution images of the test data set and predicted high resolution images." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.727, + 0.591, + 0.742 + ], + "angle": 0, + "content": "4.32. X-L" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.75, + 0.906, + 0.901 + ], + "angle": 0, + "content": "General Method Description. Their proposed partial permuted self-attention network (PPSA-Net) is shown in Fig. 35. PPSA-Net is inspired by two works: SR-Former [147] and PartialConv [9]. SRFormer is a lightweight super-resolution (SR) approach, but it inevitably still has significant redundancy in feature dimensions. To address this, they combine the strengths of PartialConv to further reduce the complexity and the computational cost. Specifically, they use a feature encoder to process the low-resolution image and feed it to four partial per" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.925, + 0.508, + 0.936 + ], + "angle": 0, + "content": "33" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.218, + 0.106, + 0.785, + 0.337 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.344, + 0.348, + 0.655, + 0.362 + ], + "angle": 0, + "content": "Figure 33. Team GXZY.AI: The structure of PFVM." + }, + { + "type": "image", + "bbox": [ + 0.216, + 0.377, + 0.784, + 0.617 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.27, + 0.636, + 0.726, + 0.65 + ], + "angle": 0, + "content": "Figure 34. Team GXZY AI: The structural details of MambaIR and DVMSR." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.677, + 0.484, + 0.859 + ], + "angle": 0, + "content": "muted self-attention (PPSA) layers, before finally feeding it into a feature decoder to obtain the final result. In more detail, within each PPSA layer, they use channel split to divide the original features into two sub-features: one comprising \\(1/4\\) of the channels and the other comprising \\(3/4\\) of the channels. The \\(1/4\\) sub-feature is processed by a permuted self-attention block [147], while the \\(3/4\\) sub-feature remains unchanged. After processing, the two sub-features are concatenated back together. This design allows us to efficiently reduce computational overhead while maintaining the model's ability to capture both local and global information, leading to high-quality SR results." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.871, + 0.484, + 0.903 + ], + "angle": 0, + "content": "Training details. They follow the same training procedure as SRFormer [147]. However, they conduct their training" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.677, + 0.747, + 0.692 + ], + "angle": 0, + "content": "using a single NVIDIA 4090 GPU." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.71, + 0.669, + 0.726 + ], + "angle": 0, + "content": "4.33.Quantum_Res" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.735, + 0.907, + 0.901 + ], + "angle": 0, + "content": "Method Details. In this work, they propose a novel student-teacher framework for super-resolution, as shown in Fig. 36 that enables a lightweight student model to achieve better performance comparable to heavier models. Specifically, to adopt this architecture, they used MambaIRv2-Light [32] as the student model, while MambaIRv2-base [32] serves as the teacher. While they use MambaIRv2-light as an efficiency, their key contribution is demonstrating that a guided student-teacher learning strategy can significantly improve SR performance while keeping model complexity low. [108]" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.925, + 0.509, + 0.937 + ], + "angle": 0, + "content": "34" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.171, + 0.089, + 0.833, + 0.231 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.32, + 0.241, + 0.678, + 0.256 + ], + "angle": 0, + "content": "Figure 35. Team X-L: Overview of the proposed PPSA-Net." + }, + { + "type": "image", + "bbox": [ + 0.094, + 0.279, + 0.485, + 0.395 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.408, + 0.484, + 0.547 + ], + "angle": 0, + "content": "Figure 36. Team Quantum_Res: The overall pipeline of efficient super-resolution approach, which employs a student-teacher training paradigm. The high-capacity Teacher Network (MambaIRv2-B) learning is transferred to the lightweight Student Network (MambaIRv2-Light) using knowledge distillation. The student network is optimized using L1 loss to ensure accurate superresolution while maintaining efficiency. The input low-resolution (LR) database serves as the training input, guiding the student model to achieve high-fidelity reconstruction with reduced computational complexity." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.561, + 0.483, + 0.682 + ], + "angle": 0, + "content": "The student model extracts the initial low-level features from the input low-resolution image using the \\(3 \\times 3\\) convolutional layer. The core of the network comprises a series of Attentive State-Space Blocks (ASSBs) [32] to capture long-range dependencies efficiently. For each block, residual connections are used to facilitate stable gradient propagation. Finally, a pixel-shuffle-based upsampling module reconstructs the final high-resolution image. [32]" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.684, + 0.483, + 0.745 + ], + "angle": 0, + "content": "The teacher model, MambaIRv2, follows the same architectural design but with increased depth and wider feature dimensions. This model has significantly more parameters and serves as an upper-bound reference for the student." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.747, + 0.483, + 0.868 + ], + "angle": 0, + "content": "Teacher-Guided Inference. The teacher model remains frozen throughout training and is only used as a qualitative reference to validate architectural choices and improvements. The student model inherits refined architectural principles from the teacher rather than weight transfer or feature alignment. This allows the student to retain its original lightweight nature while benefiting from structural knowledge obtained from a larger-capacity model [108]." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.871, + 0.484, + 0.902 + ], + "angle": 0, + "content": "Inference Strategy. During inference, an efficient patch-based processing method is applied to handle high-" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.282, + 0.905, + 0.357 + ], + "angle": 0, + "content": "resolution images. Given an input image, it is divided into overlapping patches. Each patch is processed independently by the student network, and final predictions are blended using a weighted averaging scheme to ensure seamless reconstruction. [32]" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.358, + 0.907, + 0.584 + ], + "angle": 0, + "content": "Training Details. The student model is initialized using pre-trained weights of MambaIRv2-light. The teacher model is loaded with pre-trained weights from a high-performing MambaIRv2-base variant. Fine-tuning was performed on DIV2K and LSDIR, with the number of feature channels set to 48. The training was conducted on patches of size \\(192 \\times 192\\) extracted from high-resolution images, using a batch size of 8. The model is finetuned by minimizing the L1 loss function using the Adam optimizer. The initial learning rate is set to \\(1 \\times 10^{-5}\\) and is reduced when training iterations reach specific milestones, following a Multi-StepLR decay strategy with a factor of 0.5. The total number of iterations is 150K. The teacher model is only used as a reference for guiding architectural refinement and remains frozen throughout the training." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.592, + 0.628, + 0.607 + ], + "angle": 0, + "content": "4.34. SylabSR" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.614, + 0.906, + 0.78 + ], + "angle": 0, + "content": "Method. Inspired by RLFN [56] and VARSR [88], they propose an AutoRegressive Residual Local Feature Network (AR-RLFN) to implement a two-stage super-resolution framework. Specifically, they build a lightweight version of RLFN targeting \\(2 \\times\\) super-resolution, meaning that the final \\(4 \\times\\) SR image is generated from an intermediate \\(2 \\times\\) SR image produced by the same model. The overall framework of AR-RLFN is shown in Fig. 37. Although the model needs to be run twice, the \\(2 \\times\\) SR task requires significantly fewer parameters and FLOPs compared to the original one, making the approach efficient overall." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.78, + 0.907, + 0.901 + ], + "angle": 0, + "content": "The modified structure of RLFN is further inspired by R2Net [91]. Benefiting from the two-stage strategy, their model is able to operate with fewer parameters. In their framework, they adopt three Residual Local Feature Blocks (RLFBs) with a reduced number of channels compared to the original version. Additionally, they replace ReLU with LeakyReLU to mitigate gradient vanishing. For reparameterization, they employ the Residual-in-Residual Rep Block" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.925, + 0.509, + 0.937 + ], + "angle": 0, + "content": "35" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.098, + 0.092, + 0.482, + 0.354 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.369, + 0.483, + 0.398 + ], + "angle": 0, + "content": "Figure 37. Team SylabSR: The structure of (up) AR-RLFN, (a) RLFB, (b) RRRB and (c) its reparameterization." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.426, + 0.483, + 0.47 + ], + "angle": 0, + "content": "(RRRB) [26] for improved compression, which reduces the number of parameters during inference by approximately \\(45\\%\\)." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.472, + 0.484, + 0.533 + ], + "angle": 0, + "content": "Training Strategy. They train their network on DIV2K [104] and LSDIR [64] datasets, and augment the training data using random flipping and rotation. The training process is divided into three stages:" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.534, + 0.484, + 0.653 + ], + "angle": 0, + "content": "1. HR patches of size \\(512 \\times 512\\) are randomly cropped from the ground truth DIV2K images. In this stage, the model performs \\(2 \\times\\) super-resolution. The number of channels in the RRRB is set to 12, and the batch size is set to 32. They use the Adam optimizer to minimize the Charbonnier loss, with the learning rate set to \\(5\\mathrm{e}^{-4}\\). The training runs for 100k iterations, and the learning rate is halved every 20k iterations." + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.655, + 0.483, + 0.715 + ], + "angle": 0, + "content": "2. HR patches of size \\( 256 \\times 256 \\) are randomly cropped from the ground truth DIV2K images. The model again performs \\( 2 \\times \\) super-resolution in this stage. The remaining configurations are the same as in Stage 1." + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.715, + 0.483, + 0.79 + ], + "angle": 0, + "content": "3. HR patches of size \\(512 \\times 512\\) are randomly cropped from both the DIV2K and LSDIR datasets. In this stage, they use the Adam optimizer to minimize MSE loss, with the learning rate set to \\(2\\mathrm{e}^{-4}\\). The training runs for 50k iterations, and the learning rate is halved every 10k iterations." + }, + { + "type": "list", + "bbox": [ + 0.092, + 0.534, + 0.484, + 0.79 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.802, + 0.208, + 0.817 + ], + "angle": 0, + "content": "4.35. NJUPCA" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.825, + 0.484, + 0.901 + ], + "angle": 0, + "content": "General Method Description. Inspired by SPAN [112], they propose the Spatial Frequency Network (SFNet), which fully leverages both spatial and frequency domain representations. SFNet integrates Frequency Knowledge Miner (FKM) modules after each Spatial Attention Block" + }, + { + "type": "image", + "bbox": [ + 0.526, + 0.092, + 0.897, + 0.221 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.513, + 0.235, + 0.905, + 0.263 + ], + "angle": 0, + "content": "Figure 38. Team NJUPCA: The detailed architecture of the designed FKM." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.276, + 0.905, + 0.351 + ], + "angle": 0, + "content": "(SPAB) to capture frequency domain features, complementing the spatial features extracted by SPAB. This parallel design enables the network to effectively learn and combine spatial and frequency domain representations, enhancing the performance of super-resolution reconstruction." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.352, + 0.906, + 0.472 + ], + "angle": 0, + "content": "As illustrated in Fig. 38, the frequency knowledge miner (FKM) is designed to learn frequency representation from input, which comprises two core components: multi-band frequency learner (MBFL) and full-frequency adjustment learner (FFAL). MBFL aims to enhancing frequency representation by focusing on distinct frequency bands, while FFAL adjusts frequency-domain features from a full-frequency perspective." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.472, + 0.905, + 0.503 + ], + "angle": 0, + "content": "Training Details. They employ two-stage training paradigm:" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.505, + 0.905, + 0.595 + ], + "angle": 0, + "content": "- **Stage I - Foundation Training:** Randomly initialized weights are trained on DIV2K and full LSDIR datasets using \\(128 \\times 128\\) HR patches. Configuration: Adam optimizer (\\(\\beta_{1} = 0.9\\), \\(\\beta_{2} = 0.999\\)) with L1 loss, initial learning rate \\(5 \\times 10^{-4}\\) (halved every 200 epochs), batch size 64 over 1,000 epochs (34 hours on \\(4 \\times\\) NVIDIA A6000)." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.596, + 0.905, + 0.656 + ], + "angle": 0, + "content": "- Stage II - Refinement: Initialized with Stage I weights, fine-tuned using DIV2K and LSDIR subset. Configuration: L2 loss with cosine learning schedule (\\(\\eta_{\\mathrm{initial}} = 1 \\times 10^{-4}\\)), 500 epochs." + }, + { + "type": "list", + "bbox": [ + 0.514, + 0.505, + 0.905, + 0.656 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.658, + 0.905, + 0.703 + ], + "angle": 0, + "content": "Other details: Training employed standard data augmentation (random rotation and flipping) without additional regularization techniques." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.713, + 0.638, + 0.729 + ], + "angle": 0, + "content": "4.36. DepthIBN" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.735, + 0.907, + 0.901 + ], + "angle": 0, + "content": "Single Image Super-Resolution (SISR) still faces challenges such as a large number of parameters, high memory consumption, and slow training and inference speed, despite significant advancements. These issues limit the practical use of SISR methods in real-world scenarios. Therefore, recent research has focused on developing lightweight models and optimizing network architectures. Among these techniques, Information Distillation is used to extract important features by splitting channels [43, 45, 67, 71]. One of the main challenges of CNNs is the high computational cost of convolution operations. To reduce this cost," + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.925, + 0.509, + 0.937 + ], + "angle": 0, + "content": "36" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.094, + 0.089, + 0.473, + 0.282 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.297, + 0.483, + 0.325 + ], + "angle": 0, + "content": "Figure 39. Team DepthIBN: Involution and BSConv Multi-Depth Distillation Block (IBMDB)." + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.355, + 0.483, + 0.55 + ], + "angle": 0, + "content": "the Depthwise Separable Convolution (DSConv) [40, 135] method was introduced, but due to the separate processing of channels, some information may be lost. To address this issue, BSCov optimizes feature processing by utilizing kernel correlations, improving performance and reducing computations [34]. Furthermore, shown in Fig. 39, Involution replaces fixed filters with pixel-dependent dynamic filters, making it more sensitive to spatial variations and better at capturing long-range dependencies between pixels [60]. Involution not only reduces parameters and resource consumption but also provides better performance compared to convolution-based models due to its superior feature extraction capability." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.552, + 0.483, + 0.779 + ], + "angle": 0, + "content": "Method. They used the IBMDN model in this challenge, following previous studies in the field of Lightweight Image Super-Resolution [6]. They propose an Involution and BSConv Multi-Depth Distillation Network (IBMDN), consisting of 6 Involution and BSConv Multi-Depth Distillation Blocks (IBMDB). IBMDB integrates Involution and BSConv to balance computational efficiency and feature extraction. The overall architecture of their proposed model consists of four main sections: shallow feature extraction, deep feature extraction, feature fusion, and reconstruction. A \\(3 \\times 3\\) convolution is used to extract shallow features. Then, through 6 IBMDB blocks, deep features are extracted and fused using a \\(1 \\times 1\\) convolution, followed by refinement through a \\(3 \\times 3\\) convolution. The pixel-shuffle operation is then used as the reconstruction module." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.78, + 0.483, + 0.901 + ], + "angle": 0, + "content": "The Involution and BSConv Multi-Depth Distillation Block (IBMDB) consists of three shallow residual blocks (SRB_IBMD) and one channel contrast attention (CCA) block. Based on previous experiments, the use of \\(3 \\times 3\\) convolutions, due to computational complexity and a large number of parameters, is not always the best option, especially for lightweight super-resolution models [5]. In SISR models, a fixed structure for feature extraction blocks is" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.092, + 0.905, + 0.212 + ], + "angle": 0, + "content": "usually used, while features extracted at different depths of the network may differ. This approach may prevent the model from fully exploiting its capacity. Designing blocks with varying structures tailored to the depth of the network can enhance model performance. In their proposed model, the block structure is adjusted based on network depth to achieve an optimal feature extraction combination at different levels." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.213, + 0.907, + 0.425 + ], + "angle": 0, + "content": "BSCnv reduces parameters using intra-kernel correlation, better preserves information, and improves model accuracy without increasing complexity. Involution, with fewer learning parameters, extracts visual features through its attention mechanism and increases efficiency. Therefore, in the Information distillation structure, they consider the block structure differently. At the beginning of the network, BSCnv is dominant in maintaining pixel correlation and local interactions within the block, and with increasing depth, Involution becomes the dominant operator. If BSCnv is denoted by B and Involution by I, the optimal block combination in the deep feature extraction section is as follows: BBB-BBB-BIB-BIB-IBI-IBI. The details of the blocks are shown in the Fig. 39." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.437, + 0.637, + 0.451 + ], + "angle": 0, + "content": "4.37. Cidaut AI" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.46, + 0.906, + 0.535 + ], + "angle": 0, + "content": "They propose a lightweight yet effective network with three blocks: an initial Sobel-based block and two ESA-based edge refinement blocks, regulated by a global residual connection. Upscaling is performed via pixel shuffle for efficient super-resolution." + }, + { + "type": "image", + "bbox": [ + 0.52, + 0.554, + 0.903, + 0.751 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.513, + 0.765, + 0.907, + 0.808 + ], + "angle": 0, + "content": "Figure 40. Team Cidaut AI: Fused Edge Attention Network (FEAN) structure. They also show the Sobel Fused Residual Block (SFRB) and the Inverted Residual Bottlenecks (IRB) [86]." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.826, + 0.906, + 0.901 + ], + "angle": 0, + "content": "As shown in Fig. 40, the design integrates two MobileNet Inverted Bottlenecks [86] with channel shuffle and SiLU activation for enhanced information mixing. Inspired by EFDN [117], Sobel-based attention extracts edge features, refined using partial convolutions [84] with minimal" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.925, + 0.508, + 0.936 + ], + "angle": 0, + "content": "37" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.102, + 0.092, + 0.476, + 0.233 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.249, + 0.483, + 0.279 + ], + "angle": 0, + "content": "Figure 41. Team Cidaut AI: Structure of the Enhanced ESA Block (EEB)." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.305, + 0.484, + 0.381 + ], + "angle": 0, + "content": "parameter increase. The final attention map, a weighted sum of refined \\( Gx \\), \\( Gy \\), and \\( GxGy \\), undergoes further refinement via partial convolution. A final \\( 1 \\times 1 \\) convolution preserves details while preventing excessive edge processing." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.381, + 0.484, + 0.517 + ], + "angle": 0, + "content": "The proposed ERIB block, an efficient convolutional unit with self-activation, starts with depthwise convolution and \\(1 \\times 1\\) feature expansion [86]. Partial convolutions [84] refine features, while channel shuffle enhances mixing. Inspired by Simple Gate [10], they introduce nonlinearity by reducing channels without increasing parameters. A weighted residual connection with partial convolution ensures effective information propagation, maintaining competitive performance despite PyTorch inefficiencies." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.518, + 0.484, + 0.594 + ], + "angle": 0, + "content": "For the EEB in Fig. 41, they draw inspiration from the ReNRB block [91], replacing reparameterized convolutions with ERIB for improved efficiency. Partial convolutions in the ESA bottleneck and residual connections further exploit feature map redundancy." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.594, + 0.484, + 0.731 + ], + "angle": 0, + "content": "Training Strategy. The training was carried out using the DIV2K, FLICK2R, and LSIDR (30%) datasets to improve the model's generalization ability. As a baseline, the model was trained for 1000 epochs with a cosine annealing learning rate scheduler, a crop size of \\(512 \\times 512\\), and a batch size of 16. Due to instability in the loss during training, an optimal learning rate analysis was performed whenever the loss diverged. This led to the implementation of a learning rate sweep strategy, which was organized into 5 stages." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.741, + 0.17, + 0.756 + ], + "angle": 0, + "content": "4.38.IVL" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.764, + 0.483, + 0.84 + ], + "angle": 0, + "content": "Method. Their approach builds upon the strategy used in SPAN [108], last year's winning method, to extract attention maps and integrates it into the proposed baseline architecture, EFDN [116], aiming to enhance feature extraction and structural representation in image processing tasks." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.84, + 0.484, + 0.901 + ], + "angle": 0, + "content": "Specifically, as illustrated in Figure 42, this strategy is incorporated within the EDBB blocks of EFDN, which are designed to capture fundamental structural features of an image by applying Sobel and Laplacian filters. These fil" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.092, + 0.906, + 0.168 + ], + "angle": 0, + "content": "ters emphasize edge and texture information, contributing to improved representation learning. During the inference phase, the EDBB blocks are reparametrized into 3x3 convolutions to maintain computational efficiency while preserving learned feature representations." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.18, + 0.906, + 0.286 + ], + "angle": 0, + "content": "The attention maps are derived following the approach implemented in SPAN, leveraging an activation function that is both odd and symmetric to effectively highlight essential regions of the image. These attention maps serve as a direct substitute for the ESA block present in the original EFDN model, aiming to refine feature selection and enhance the model's overall performance." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.299, + 0.906, + 0.359 + ], + "angle": 0, + "content": "As a result of the applied modifications, the final architecture has a lower parameter count and requires fewer floating-point operations compared to the proposed baseline method, EFDN." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.372, + 0.906, + 0.418 + ], + "angle": 0, + "content": "Training Details. The training process is structured into three progressive phases to optimize performance and stability:" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.43, + 0.907, + 0.581 + ], + "angle": 0, + "content": "- Pre-training: The model undergoes an initial training phase using the DIV2K dataset, incorporating data augmentation techniques such as random rotations, horizontal flipping, and random cropping to generate patches of size \\(64 \\times 64\\). Training is conducted over 30,000 iterations with a batch size of 32, utilizing the Adam optimizer \\((\\beta_{1} = 0.9, \\beta_{2} = 0.999)\\). The learning rate is initially set to 1e-3 for the first 20,000 iterations and subsequently reduced to 1e-4 for the remaining 10,000 iterations. L1 loss is used throughout this phase." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.582, + 0.906, + 0.717 + ], + "angle": 0, + "content": "- First training stage: The model is further refined using the DIV2K_LSDIR dataset, while maintaining the same augmentation strategies as in the pre-training phase. The patch size is increased to \\(256 \\times 256\\), and training is extended to 100,000 iterations with a batch size of 64. The Adam optimizer \\((\\beta_{1} = 0.9, \\beta_{2} = 0.999)\\) is employed, starting with a learning rate of 5e-4, which undergoes a decay by a factor of 0.5 every 20,000 iterations. L1 loss remains the chosen loss function for this stage." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.718, + 0.906, + 0.852 + ], + "angle": 0, + "content": "- Second training stage: In the final phase, training continues on the DIV2K_LSDIR dataset with an expanded patch size of \\(512 \\times 512\\) for an additional 40,000 iterations. The same augmentation methods are retained, and most hyperparameters remain unchanged. However, to ensure stable convergence and fine-tune performance, the learning rate is reduced to 5e-5. During this stage, L1 loss is applied for the first 10,000 iterations, after which L2 loss is utilized to enhance final model performance." + }, + { + "type": "list", + "bbox": [ + 0.514, + 0.43, + 0.907, + 0.852 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.527, + 0.853, + 0.906, + 0.899 + ], + "angle": 0, + "content": "All the training phases were performed of the model a single NVIDIA RTX 4070 Super GPU and required approximately 20 hours." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.925, + 0.508, + 0.936 + ], + "angle": 0, + "content": "38" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.09, + 0.089, + 0.912, + 0.316 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.33, + 0.324, + 0.668, + 0.34 + ], + "angle": 0, + "content": "Figure 42. Team IVL: Schematic diagram of the method." + }, + { + "type": "title", + "bbox": [ + 0.092, + 0.364, + 0.251, + 0.381 + ], + "angle": 0, + "content": "Acknowledgments" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.389, + 0.484, + 0.48 + ], + "angle": 0, + "content": "This work was partially supported by the Humboldt Foundation, the Ministry of Education and Science of Bulgaria (support for INSAIT, part of the Bulgarian National Roadmap for Research Infrastructure). We thank the NTIRE 2025 sponsors: ByteDance, Meituan, Kuaishou, and University of Wurzburg (Computer Vision Lab)." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.492, + 0.308, + 0.508 + ], + "angle": 0, + "content": "A. Teams and Affiliations" + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.516, + 0.287, + 0.531 + ], + "angle": 0, + "content": "NTIRE 2025 ESR Teams" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.538, + 0.474, + 0.554 + ], + "angle": 0, + "content": "Title: NTIRE 2025 Efficient Super-Resolution Challenge" + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.555, + 0.165, + 0.567 + ], + "angle": 0, + "content": "Members:" + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.568, + 0.305, + 0.583 + ], + "angle": 0, + "content": "Bin Ren\\(^{1,2,4}\\) (bin. ren@unitn.it)," + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.584, + 0.32, + 0.599 + ], + "angle": 0, + "content": "Hang Guo\\(^{3}\\) (cshguo@gmail.com)," + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.599, + 0.279, + 0.614 + ], + "angle": 0, + "content": "Lei Sun4 (lei.sun@insait.ai)" + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.614, + 0.416, + 0.629 + ], + "angle": 0, + "content": "Zongwei Wu5 (zongwei.wu@uni-wuerzburg.de)," + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.629, + 0.413, + 0.644 + ], + "angle": 0, + "content": "Radu Timofte\\(^{5}\\) (radu.timofte@vision.ee.ethz.ch)" + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.644, + 0.334, + 0.66 + ], + "angle": 0, + "content": "Yawei \\(\\mathrm{Li^{6}}\\) (li.yawei.ai@gmail.com)," + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.66, + 0.176, + 0.674 + ], + "angle": 0, + "content": "Affiliations:" + }, + { + "type": "text", + "bbox": [ + 0.094, + 0.675, + 0.262, + 0.689 + ], + "angle": 0, + "content": "1 University of Pisa, Italy" + }, + { + "type": "text", + "bbox": [ + 0.093, + 0.69, + 0.278, + 0.704 + ], + "angle": 0, + "content": "\\(^{2}\\) University of Trento, Italy" + }, + { + "type": "text", + "bbox": [ + 0.093, + 0.705, + 0.286, + 0.719 + ], + "angle": 0, + "content": "3 Tsinghua University, China" + }, + { + "type": "text", + "bbox": [ + 0.093, + 0.719, + 0.482, + 0.735 + ], + "angle": 0, + "content": "4 INSÄIT, Sofia University,\"St. Kliment Ohridski\", Bulgaria" + }, + { + "type": "text", + "bbox": [ + 0.093, + 0.735, + 0.481, + 0.75 + ], + "angle": 0, + "content": "5 Computer Vision Lab, University of Würzburg, Germany" + }, + { + "type": "text", + "bbox": [ + 0.093, + 0.75, + 0.273, + 0.763 + ], + "angle": 0, + "content": "\\(^{6}\\) ETH Zürich, Switzerland" + }, + { + "type": "list", + "bbox": [ + 0.093, + 0.675, + 0.482, + 0.763 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.092, + 0.788, + 0.148, + 0.802 + ], + "angle": 0, + "content": "EMSR" + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.81, + 0.483, + 0.825 + ], + "angle": 0, + "content": "Title: Distillation-Supervised Convolutional Low-Rank" + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.825, + 0.414, + 0.841 + ], + "angle": 0, + "content": "Adaptation for Efficient Image Super-Resolution" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.841, + 0.164, + 0.854 + ], + "angle": 0, + "content": "Members:" + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.856, + 0.351, + 0.871 + ], + "angle": 0, + "content": "Yao Zhang \\(^{1}\\) (yao_zhang@sjtu.edu.cn)," + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.871, + 0.377, + 0.886 + ], + "angle": 0, + "content": "Xinning Chai1 (chaixinning@sjtu.edu.cn)," + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.886, + 0.379, + 0.901 + ], + "angle": 0, + "content": "Zhengxue Cheng1 (zxcheng@sjtu.edu.cn)," + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.365, + 0.845, + 0.38 + ], + "angle": 0, + "content": "Yingsheng Qin \\(^{2}\\) (yingsheng.qin@transsion.com)," + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.38, + 0.801, + 0.395 + ], + "angle": 0, + "content": "Yucai Yang \\(^{2}\\) (yucai.yang@transsion.com)," + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.395, + 0.732, + 0.411 + ], + "angle": 0, + "content": "Li Song \\(^{1}\\) (song_li@sjtu.edu.cn)," + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.411, + 0.599, + 0.424 + ], + "angle": 0, + "content": "Affiliations:" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.425, + 0.73, + 0.441 + ], + "angle": 0, + "content": "\\(^{1}\\) Shanghai Jiao Tong University" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.441, + 0.654, + 0.454 + ], + "angle": 0, + "content": "2 Transsion in China" + }, + { + "type": "list", + "bbox": [ + 0.515, + 0.425, + 0.73, + 0.454 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.482, + 0.61, + 0.496 + ], + "angle": 0, + "content": "XiaomiMM" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.504, + 0.615, + 0.518 + ], + "angle": 0, + "content": "Title: SPANF" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.52, + 0.586, + 0.533 + ], + "angle": 0, + "content": "Members:" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.534, + 0.808, + 0.55 + ], + "angle": 0, + "content": "Hongyuan \\(\\mathrm{Yu}^1\\) (yuhyuan1995@gmail.com)," + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.55, + 0.802, + 0.565 + ], + "angle": 0, + "content": "Pufan \\(\\mathrm{Xu}^2\\) (xpf22@mails.tsinghua.edu.cn)," + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.565, + 0.778, + 0.58 + ], + "angle": 0, + "content": "Cheng Wan3 (jouiney666@gmail.com)," + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.58, + 0.823, + 0.595 + ], + "angle": 0, + "content": "Zhijuan Huang1 (huangzhijuan@xiaomi.com)," + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.595, + 0.77, + 0.61 + ], + "angle": 0, + "content": "Peng Guo\\(^{4}\\) (guopeng0100@163.com)," + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.61, + 0.787, + 0.625 + ], + "angle": 0, + "content": "Shuyuan Cui5 (jouiney666@gmail.com)," + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.625, + 0.752, + 0.64 + ], + "angle": 0, + "content": "Chenjun Li\\(^{3}\\) (cl2733@cornell.edu)," + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.64, + 0.781, + 0.656 + ], + "angle": 0, + "content": "Xuehai Hu (hsquare@mail.ustc.edu.cn)," + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.656, + 0.737, + 0.671 + ], + "angle": 0, + "content": "Pan Pan1 (panpan@xiaomi.com)," + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.671, + 0.784, + 0.686 + ], + "angle": 0, + "content": "Xin Zhang\\(^{1}\\) (zhangxin14@xiaomi.com)," + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.686, + 0.797, + 0.701 + ], + "angle": 0, + "content": "Heng Zhang\\(^{1}\\) (zhangheng8@xiaomi.com)," + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.717, + 0.598, + 0.731 + ], + "angle": 0, + "content": "Affiliations:" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.731, + 0.773, + 0.746 + ], + "angle": 0, + "content": "1 Multimedia Department, Xiaomi Inc." + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.746, + 0.86, + 0.762 + ], + "angle": 0, + "content": "\\(^{2}\\) School of Integrated Circuits, Tsinghua University" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.762, + 0.652, + 0.776 + ], + "angle": 0, + "content": "3 Cornell University" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.776, + 0.876, + 0.791 + ], + "angle": 0, + "content": "4 Hanhai Information Technology (Shanghai) Co., Ltd." + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.791, + 0.748, + 0.807 + ], + "angle": 0, + "content": "5 Huatai Insurance Group Co., Ltd." + }, + { + "type": "list", + "bbox": [ + 0.515, + 0.731, + 0.876, + 0.807 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.833, + 0.618, + 0.847 + ], + "angle": 0, + "content": "ShannonLab" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.856, + 0.905, + 0.871 + ], + "angle": 0, + "content": "Title: Reparameterization Network for Efficient Image" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.871, + 0.632, + 0.885 + ], + "angle": 0, + "content": "Super-Resolution" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.886, + 0.586, + 0.9 + ], + "angle": 0, + "content": "Members:" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.925, + 0.509, + 0.937 + ], + "angle": 0, + "content": "39" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.091, + 0.091, + 0.319, + 0.167 + ], + "angle": 0, + "content": "Qing Luo\\(^{1}\\) (luoqing.94@qq.com), Linyan Jiang\\(^{1}\\), Haibo Lei\\(^{1}\\), Qifang Gao\\(^{1}\\), Yaqing Li\\(^{1}\\)," + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.183, + 0.176, + 0.212 + ], + "angle": 0, + "content": "Affiliations: \n1Tencent" + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.238, + 0.142, + 0.252 + ], + "angle": 0, + "content": "TSSR" + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.26, + 0.483, + 0.321 + ], + "angle": 0, + "content": "Title: Light Network for Efficient Image Super-Resolution \nMembers: \nWeihua Luo1 (185471613@qq.com), \nTsing Li1," + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.336, + 0.26, + 0.366 + ], + "angle": 0, + "content": "Affiliations: \n1 Independent researcher" + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.392, + 0.14, + 0.407 + ], + "angle": 0, + "content": "mbga" + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.413, + 0.451, + 0.535 + ], + "angle": 0, + "content": "Title: Expanded SPAN for Efficient Super-Resolution Members: \nQing Wang\\(^{1}\\) (wangqing.Keen@bytedance.com), \nYi Liu\\(^{1}\\), \nYang Wang\\(^{1}\\), \nHongyu An\\(^{1}\\), \nLiou Zhang\\(^{1}\\), \nShijie Zhao\\(^{1}\\)," + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.55, + 0.18, + 0.58 + ], + "angle": 0, + "content": "Affiliations: \n1 ByteDance" + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.605, + 0.166, + 0.619 + ], + "angle": 0, + "content": "VPEG_C" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.627, + 0.483, + 0.748 + ], + "angle": 0, + "content": "Title: DAN: Dual Attention Network for lightweight Image Super-Resolution \nMembers: \nLianhong Song1 (songlianhong@njust.edu.cn), \nLong Sun1, \nJinshan Pan1, \nJiangxin Dong1, \nJinhui Tang1" + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.764, + 0.414, + 0.794 + ], + "angle": 0, + "content": "Affiliations: \n1Nanjing University of Science and Technology" + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.818, + 0.182, + 0.834 + ], + "angle": 0, + "content": "XUPTBoys" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.84, + 0.483, + 0.902 + ], + "angle": 0, + "content": "Title: Frequency-Guided Multi-level Dispersion Network for Efficient Image Super-Resolution \nMembers: Jing Wei1 (freedomwj@126.com)," + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.091, + 0.641, + 0.15 + ], + "angle": 0, + "content": "Mengyang Wang1, Ruilong Guo1, Qian Wang1,2, Affiliations:" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.152, + 0.905, + 0.198 + ], + "angle": 0, + "content": "\\(^{1}\\) Xi'an University of Posts and Telecommunications \\(^{2}\\) National Engineering Laboratory for Cyber Event Warning and Control Technologies" + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.219, + 0.604, + 0.234 + ], + "angle": 0, + "content": "HannahSR" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.241, + 0.905, + 0.378 + ], + "angle": 0, + "content": "Title: Multi-level Refinement and Bias-learnable Attention Dual Branch Network for Efficient Image Super-Resolution Members: Qingliang Liu\\(^{1}\\) (liuqingliang1@honor.com), Yang Cheng\\(^{2}\\) (obliviate73@outlook.com) Affiliations: \n\\(^{1}\\) Beijing Honor Device Co., Ltd. \n\\(^{2}\\) State Key Laboratory of Integrated Chip & System, Fudan University" + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.399, + 0.578, + 0.413 + ], + "angle": 0, + "content": "Davinci" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.421, + 0.744, + 0.513 + ], + "angle": 0, + "content": "Title: PlayerAug \nMembers: \nDavinci (1016994139@qq.com), \nEnxuan Gu1(guexstan@163.com), \nAffiliations: \n1 Dalian University of Technology" + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.534, + 0.567, + 0.548 + ], + "angle": 0, + "content": "SRCB" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.557, + 0.822, + 0.648 + ], + "angle": 0, + "content": "Title: SPAN with pruning. \nMembers: \nDafeng Zhang1 (dfeng.zhang@samsung.com), Yang Yong1, \nAffiliations: \n1 Samsung Research China - Beijing (SRC-B)" + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.669, + 0.596, + 0.683 + ], + "angle": 0, + "content": "Rochester" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.691, + 0.905, + 0.827 + ], + "angle": 0, + "content": "Title: ESRNet: An enhanced version of SPAN for Efficient Super-Resolution \nMembers: \nPinxin Liu1 (pliu23@ur.rochester.edu), \nYongsheng Yu1 (yyu90@ur.rochester.edu), \nHang Hua1 (hhua2@cs.rochester.edu), \nYunlong Tang1 (yunlong.tang@rochester.edu), \nAffiliations: \n1 University of Rochester" + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.849, + 0.561, + 0.863 + ], + "angle": 0, + "content": "IESR" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.871, + 0.833, + 0.9 + ], + "angle": 0, + "content": "Title: Inference Efficient Super-Rosolution Net Members:" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.925, + 0.509, + 0.937 + ], + "angle": 0, + "content": "40" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.091, + 0.091, + 0.388, + 0.167 + ], + "angle": 0, + "content": "Shihao Wang1 (shihao.wsh@antgroup.com), Yukun Yang1, Zhiyu Zhang1, Affiliations: \n1 Ant Group" + }, + { + "type": "title", + "bbox": [ + 0.093, + 0.198, + 0.133, + 0.212 + ], + "angle": 0, + "content": "ASR" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.222, + 0.174, + 0.236 + ], + "angle": 0, + "content": "Title: ASR" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.238, + 0.163, + 0.251 + ], + "angle": 0, + "content": "Members:" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.252, + 0.376, + 0.268 + ], + "angle": 0, + "content": "Yukun Yang\\(^{1}\\) (yukun.yyk@antgroup.com)," + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.269, + 0.175, + 0.282 + ], + "angle": 0, + "content": "Affiliations:" + }, + { + "type": "text", + "bbox": [ + 0.094, + 0.282, + 0.143, + 0.296 + ], + "angle": 0, + "content": "1 None" + }, + { + "type": "title", + "bbox": [ + 0.092, + 0.328, + 0.167, + 0.343 + ], + "angle": 0, + "content": "VPEG_O" + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.352, + 0.483, + 0.383 + ], + "angle": 0, + "content": "Title: SAFMNv3: Simple Feature Modulation Network for Real-Time Image Super-Resolution" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.385, + 0.163, + 0.397 + ], + "angle": 0, + "content": "Members:" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.398, + 0.349, + 0.413 + ], + "angle": 0, + "content": "Long Sun1 (cs.longsun@njust.edu.cn)," + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.413, + 0.203, + 0.428 + ], + "angle": 0, + "content": "Lianhong Son1," + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.429, + 0.184, + 0.442 + ], + "angle": 0, + "content": "Jinshan Pan1," + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.444, + 0.203, + 0.459 + ], + "angle": 0, + "content": "Jiangxin Dong1," + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.459, + 0.178, + 0.473 + ], + "angle": 0, + "content": "Jinhui Tang" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.475, + 0.175, + 0.488 + ], + "angle": 0, + "content": "Affiliations:" + }, + { + "type": "text", + "bbox": [ + 0.094, + 0.488, + 0.412, + 0.504 + ], + "angle": 0, + "content": "1 Nanjing University of Science and Technology" + }, + { + "type": "title", + "bbox": [ + 0.092, + 0.535, + 0.15, + 0.549 + ], + "angle": 0, + "content": "mmSR" + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.559, + 0.483, + 0.589 + ], + "angle": 0, + "content": "Title: Efficient Feature Aggregation Network for Image Super-Resolution" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.59, + 0.163, + 0.603 + ], + "angle": 0, + "content": "Members:" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.605, + 0.299, + 0.62 + ], + "angle": 0, + "content": "Jiyu \\(\\mathsf{W u}^1\\) (jiyu_wu@163.com)," + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.621, + 0.366, + 0.635 + ], + "angle": 0, + "content": "Jiancheng Huang \\(^{1}\\)(jc.huang@siat.ac.cn)," + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.635, + 0.174, + 0.648 + ], + "angle": 0, + "content": "Yifan Liu1," + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.649, + 0.174, + 0.664 + ], + "angle": 0, + "content": "Yi Huang \\(^{1}\\)," + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.664, + 0.199, + 0.68 + ], + "angle": 0, + "content": "Shifeng Chen 1," + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.681, + 0.176, + 0.695 + ], + "angle": 0, + "content": "Affiliations:" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.695, + 0.483, + 0.725 + ], + "angle": 0, + "content": "1 Shenzhen Institutes of Advanced Technology, Chinese Academy of Sciences" + }, + { + "type": "title", + "bbox": [ + 0.092, + 0.756, + 0.162, + 0.77 + ], + "angle": 0, + "content": "ChanSR" + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.78, + 0.483, + 0.811 + ], + "angle": 0, + "content": "Title: EECNet: Edge Enhanced Convolutional Network for Efficient Super-Resolution" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.812, + 0.163, + 0.824 + ], + "angle": 0, + "content": "Members:" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.825, + 0.316, + 0.841 + ], + "angle": 0, + "content": "Rui Chen1 (chenr269@163.com)," + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.841, + 0.175, + 0.855 + ], + "angle": 0, + "content": "Affiliations:" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.856, + 0.483, + 0.886 + ], + "angle": 0, + "content": "1 Shenzhen International Graduate School, Tsinghua University, China" + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.091, + 0.648, + 0.105 + ], + "angle": 0, + "content": "Pixel Alchemists" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.113, + 0.623, + 0.127 + ], + "angle": 0, + "content": "Title: RCUNet" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.129, + 0.586, + 0.142 + ], + "angle": 0, + "content": "Members:" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.144, + 0.741, + 0.158 + ], + "angle": 0, + "content": "Yi Feng\\(^{1}\\) (fenyi_work@163.com)," + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.159, + 0.596, + 0.173 + ], + "angle": 0, + "content": "Mingxi \\(\\mathrm{Li}^1\\)" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.174, + 0.598, + 0.188 + ], + "angle": 0, + "content": "Cailu Wan1," + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.189, + 0.606, + 0.204 + ], + "angle": 0, + "content": "Xiangji \\(\\mathbf{W}\\mathbf{u}^{1}\\)" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.22, + 0.598, + 0.233 + ], + "angle": 0, + "content": "Affiliations:" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.234, + 0.682, + 0.248 + ], + "angle": 0, + "content": "\\(^{1}\\) Independent researcher" + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.273, + 0.543, + 0.287 + ], + "angle": 0, + "content": "LZ" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.295, + 0.905, + 0.31 + ], + "angle": 0, + "content": "Title: Tensor decompose efficient super-resolution network" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.311, + 0.586, + 0.324 + ], + "angle": 0, + "content": "Members:" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.325, + 0.752, + 0.34 + ], + "angle": 0, + "content": "Zibin Liu1 (1451971605@qq.com)," + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.341, + 0.786, + 0.356 + ], + "angle": 0, + "content": "Jinyang Zhong\\(^{2}\\) (1439764064@qq.com)," + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.371, + 0.598, + 0.384 + ], + "angle": 0, + "content": "Affiliations:" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.385, + 0.729, + 0.4 + ], + "angle": 0, + "content": "\\(^{1}\\) Southwest Jiaotong University" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.401, + 0.655, + 0.415 + ], + "angle": 0, + "content": "Sichuan University" + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.44, + 0.541, + 0.454 + ], + "angle": 0, + "content": "Z6" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.461, + 0.905, + 0.492 + ], + "angle": 0, + "content": "Title: GLoReNet: Global and Local feature Refinement Network for Efficient Super-Resolution" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.493, + 0.586, + 0.505 + ], + "angle": 0, + "content": "Members:" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.507, + 0.783, + 0.522 + ], + "angle": 0, + "content": "Kihwan Yoon\\(^{1}\\) (rlghksdbs@gmail.com)," + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.523, + 0.67, + 0.538 + ], + "angle": 0, + "content": "Ganzorig Gankhuyag1," + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.538, + 0.598, + 0.552 + ], + "angle": 0, + "content": "Affiliations:" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.552, + 0.837, + 0.567 + ], + "angle": 0, + "content": "\\(^{1}\\) Korea Electronics Technology Institute (KETI)" + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.592, + 0.598, + 0.606 + ], + "angle": 0, + "content": "TACO_SR" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.614, + 0.648, + 0.627 + ], + "angle": 0, + "content": "Title: TenInOneSR" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.63, + 0.586, + 0.642 + ], + "angle": 0, + "content": "Members:" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.644, + 0.875, + 0.659 + ], + "angle": 0, + "content": "Shengyun Zhong\\(^{1}\\) (shengyunzhong2002@gmail.com)," + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.659, + 0.779, + 0.674 + ], + "angle": 0, + "content": "Mingyang \\(\\mathbf{W u}^{2}\\) (mingyang@tamu.edu)," + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.674, + 0.718, + 0.688 + ], + "angle": 0, + "content": "Renjie \\(\\mathrm{Li}^2\\) renjie@tamu.edu)," + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.689, + 0.793, + 0.704 + ], + "angle": 0, + "content": "Yushen Zuo\\(^{3}\\) (zuoyushen12@gmail.com)," + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.705, + 0.742, + 0.719 + ], + "angle": 0, + "content": "Zhengzhong \\(\\mathrm{Tu}^2\\) (tzz@tamu.edu)," + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.72, + 0.598, + 0.733 + ], + "angle": 0, + "content": "Affiliations:" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.734, + 0.726, + 0.749 + ], + "angle": 0, + "content": "1 Northeastern University, USA" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.749, + 0.723, + 0.764 + ], + "angle": 0, + "content": "\\(^{2}\\) Texas A&M University, USA" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.764, + 0.871, + 0.78 + ], + "angle": 0, + "content": "3 The Hong Kong Polytechnic University, Hong Kong" + }, + { + "type": "list", + "bbox": [ + 0.515, + 0.734, + 0.871, + 0.78 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.803, + 0.591, + 0.818 + ], + "angle": 0, + "content": "AIOT.AI" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.825, + 0.905, + 0.856 + ], + "angle": 0, + "content": "Title: Efficient channel attention super-resolution network acting on space" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.857, + 0.586, + 0.869 + ], + "angle": 0, + "content": "Members:" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.871, + 0.787, + 0.886 + ], + "angle": 0, + "content": "Zongang Gao \\(1^{1}\\) (gaozongang@qq.com)," + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.887, + 0.627, + 0.9 + ], + "angle": 0, + "content": "Guannan Chen1," + }, + { + "type": "page_number", + "bbox": [ + 0.489, + 0.925, + 0.508, + 0.937 + ], + "angle": 0, + "content": "41" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.092, + 0.091, + 0.175, + 0.105 + ], + "angle": 0, + "content": "Yuan Tian1," + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.107, + 0.191, + 0.12 + ], + "angle": 0, + "content": "Wenhui Chen" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.123, + 0.175, + 0.136 + ], + "angle": 0, + "content": "Affiliations:" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.136, + 0.326, + 0.152 + ], + "angle": 0, + "content": "\\(^{1}\\) BOE, AIOT CTO, Beijing, China" + }, + { + "type": "title", + "bbox": [ + 0.092, + 0.177, + 0.159, + 0.192 + ], + "angle": 0, + "content": "JNU620" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.199, + 0.483, + 0.229 + ], + "angle": 0, + "content": "Title: Reparameterized Residual Local Feature Network for Efficient Image Super-Resolution" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.231, + 0.163, + 0.244 + ], + "angle": 0, + "content": "Members:" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.245, + 0.397, + 0.26 + ], + "angle": 0, + "content": "Weijun Yuan\\(^{1}\\) (yweijun@stu2022.jnu.edu.cn)," + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.261, + 0.157, + 0.274 + ], + "angle": 0, + "content": "Zhan Li1," + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.275, + 0.192, + 0.29 + ], + "angle": 0, + "content": "Yihang Chen1," + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.291, + 0.184, + 0.306 + ], + "angle": 0, + "content": "Yifan Deng1," + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.306, + 0.191, + 0.321 + ], + "angle": 0, + "content": "Ruting Deng1," + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.322, + 0.175, + 0.335 + ], + "angle": 0, + "content": "Affiliations:" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.335, + 0.214, + 0.351 + ], + "angle": 0, + "content": "\\(^{1}\\) Jinan University" + }, + { + "type": "title", + "bbox": [ + 0.092, + 0.375, + 0.225, + 0.392 + ], + "angle": 0, + "content": "LVGroup_HFUT" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.398, + 0.483, + 0.428 + ], + "angle": 0, + "content": "Title: Swift Parameter-free Attention Network for Efficient Image Super-Resolution" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.43, + 0.163, + 0.442 + ], + "angle": 0, + "content": "Members:" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.444, + 0.318, + 0.459 + ], + "angle": 0, + "content": "Yilin Zhang\\(^{1}\\) (eslzzyl@163.com)," + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.459, + 0.395, + 0.474 + ], + "angle": 0, + "content": "Huan Zheng\\(^{2}\\), (huanzheng1998@gmail.com)," + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.474, + 0.329, + 0.488 + ], + "angle": 0, + "content": "Yanyan Wei1 (weiyy@hfut.edu.cn)," + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.489, + 0.396, + 0.504 + ], + "angle": 0, + "content": "Wenxuan Zhao\\(^{1}\\) (nightvoyagerr@gmail.com)," + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.504, + 0.371, + 0.519 + ], + "angle": 0, + "content": "Suiyi Zhao\\(^{1}\\) (meranderzhao@gmail.com)," + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.519, + 0.327, + 0.535 + ], + "angle": 0, + "content": "Fei Wang1 (jiafei127@gmail.com)," + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.535, + 0.317, + 0.55 + ], + "angle": 0, + "content": "Kun Li\\(^{1}\\) (kunli.hfut@gmail.com)," + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.551, + 0.175, + 0.564 + ], + "angle": 0, + "content": "Affiliations:" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.564, + 0.314, + 0.58 + ], + "angle": 0, + "content": "1 Hefei University of Technology" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.58, + 0.242, + 0.595 + ], + "angle": 0, + "content": "2 University of Macau" + }, + { + "type": "title", + "bbox": [ + 0.092, + 0.62, + 0.123, + 0.634 + ], + "angle": 0, + "content": "YG" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.642, + 0.483, + 0.672 + ], + "angle": 0, + "content": "Title: Spatial-Gate Self-Distillation Network for Efficient Image Super-Resolution" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.673, + 0.163, + 0.686 + ], + "angle": 0, + "content": "Members:" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.687, + 0.347, + 0.703 + ], + "angle": 0, + "content": "Yinggan Tang \\(^{1}\\) (ygtang@ysu.edu.cn)," + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.703, + 0.187, + 0.718 + ], + "angle": 0, + "content": "Mengjie Su 2," + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.719, + 0.175, + 0.733 + ], + "angle": 0, + "content": "Affiliations:" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.733, + 0.456, + 0.748 + ], + "angle": 0, + "content": "\\(^{1}\\) School of Electrical Engineering, Yanshan University" + }, + { + "type": "title", + "bbox": [ + 0.092, + 0.773, + 0.294, + 0.789 + ], + "angle": 0, + "content": "MegastudyEdu_Vision.AI" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.795, + 0.483, + 0.826 + ], + "angle": 0, + "content": "Title: Multi-scale Aggregation Attention Network for Efficient Image Super-resolution" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.827, + 0.163, + 0.839 + ], + "angle": 0, + "content": "Members:" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.84, + 0.384, + 0.856 + ], + "angle": 0, + "content": "Jae-hyeon Lee \\(^{1}\\) (dlwogus147@gmail.com)," + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.856, + 0.225, + 0.871 + ], + "angle": 0, + "content": "Dong-Hyeop Son1," + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.871, + 0.188, + 0.885 + ], + "angle": 0, + "content": "Ui-Jin Choi1," + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.886, + 0.175, + 0.901 + ], + "angle": 0, + "content": "Affiliations:" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.091, + 0.697, + 0.107 + ], + "angle": 0, + "content": "\\(^{1}\\) MegastudyEdu Vision AI" + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.132, + 0.568, + 0.146 + ], + "angle": 0, + "content": "MILA" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.154, + 0.905, + 0.185 + ], + "angle": 0, + "content": "Title: Multi-Level Variance Feature Modulation Network for Lightweight Image Super-Resolution" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.186, + 0.586, + 0.198 + ], + "angle": 0, + "content": "Members:" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.199, + 0.865, + 0.215 + ], + "angle": 0, + "content": "Tiancheng Shao1 (shaotiancheng666@outlook.com)," + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.215, + 0.622, + 0.23 + ], + "angle": 0, + "content": "Yuqing Zhang2" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.23, + 0.633, + 0.246 + ], + "angle": 0, + "content": "Mengcheng \\(\\mathrm{Ma}^3\\)" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.246, + 0.598, + 0.26 + ], + "angle": 0, + "content": "Affiliations:" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.26, + 0.742, + 0.276 + ], + "angle": 0, + "content": "1 Anhui University of Technology" + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.3, + 0.594, + 0.315 + ], + "angle": 0, + "content": "AiMF_SR" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.323, + 0.905, + 0.353 + ], + "angle": 0, + "content": "Title: Mixture of Efficient Attention for Efficient Image Super-Resolution" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.354, + 0.586, + 0.366 + ], + "angle": 0, + "content": "Members:" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.368, + 0.779, + 0.384 + ], + "angle": 0, + "content": "Donggeun \\(\\mathrm{Ko}^1\\) (sean.ko@aimfuture.ai)," + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.384, + 0.645, + 0.398 + ], + "angle": 0, + "content": "Youngsang Kwak1," + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.399, + 0.586, + 0.412 + ], + "angle": 0, + "content": "Jiun Lee1," + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.413, + 0.621, + 0.428 + ], + "angle": 0, + "content": "Jaehwa Kwak1," + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.429, + 0.598, + 0.444 + ], + "angle": 0, + "content": "Affiliations:" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.444, + 0.636, + 0.457 + ], + "angle": 0, + "content": "1 AiM Future Inc." + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.484, + 0.587, + 0.498 + ], + "angle": 0, + "content": "BVIVSR" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.506, + 0.869, + 0.522 + ], + "angle": 0, + "content": "Title: NTIRE 2025 Efficient SR Challenge Factsheet" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.522, + 0.586, + 0.535 + ], + "angle": 0, + "content": "Members:" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.536, + 0.813, + 0.552 + ], + "angle": 0, + "content": "Yuxuan Jiang\\(^{1}\\) (yuxuan.jiang@bristol.ac.uk)," + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.552, + 0.804, + 0.567 + ], + "angle": 0, + "content": "Qiang Zhu\\(^{2,1}\\) (zhuqiang@std.uestc.edu.cn)," + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.567, + 0.78, + 0.582 + ], + "angle": 0, + "content": "Siyue Teng1 (siyue.teng@bristol.ac.uk)," + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.582, + 0.779, + 0.597 + ], + "angle": 0, + "content": "Fan Zhang1, (fan.zhang@bristol.ac.uk)," + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.597, + 0.769, + 0.612 + ], + "angle": 0, + "content": "Shuyuan Zhu2, (eezsy@uestc.edu.cn)," + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.612, + 0.761, + 0.627 + ], + "angle": 0, + "content": "Bing Zeng\\(^{2}\\), (eezeng@uestc.edu.cn)," + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.627, + 0.771, + 0.642 + ], + "angle": 0, + "content": "David Bull\\(^{1}\\) (dave.bull@bristol.ac.uk)," + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.643, + 0.598, + 0.657 + ], + "angle": 0, + "content": "Affiliations:" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.658, + 0.664, + 0.672 + ], + "angle": 0, + "content": "1 University of Bristol" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.672, + 0.905, + 0.688 + ], + "angle": 0, + "content": "\\(^{2}\\) University of Electronic Science and Technology of China" + }, + { + "type": "list", + "bbox": [ + 0.515, + 0.658, + 0.905, + 0.688 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.712, + 0.608, + 0.727 + ], + "angle": 0, + "content": "CUIT_HTT" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.735, + 0.905, + 0.765 + ], + "angle": 0, + "content": "Title: Frequency-Segmented Attention Network for Lightweight Image Super" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.766, + 0.586, + 0.778 + ], + "angle": 0, + "content": "Members:" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.779, + 0.715, + 0.796 + ], + "angle": 0, + "content": "Jing Hu1 (jing_hu@163.com)," + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.796, + 0.593, + 0.81 + ], + "angle": 0, + "content": "Hui Deng1," + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.811, + 0.611, + 0.825 + ], + "angle": 0, + "content": "Xuan Zhang\\(^{1}\\)," + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.826, + 0.577, + 0.839 + ], + "angle": 0, + "content": "Lin Zhu" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.84, + 0.596, + 0.855 + ], + "angle": 0, + "content": "Qinrui Fan" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.857, + 0.598, + 0.871 + ], + "angle": 0, + "content": "Affiliations:" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.871, + 0.842, + 0.886 + ], + "angle": 0, + "content": "1 Chengdu University of Information Technology" + }, + { + "type": "page_number", + "bbox": [ + 0.489, + 0.925, + 0.509, + 0.937 + ], + "angle": 0, + "content": "42" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.092, + 0.091, + 0.176, + 0.106 + ], + "angle": 0, + "content": "GXZY.AI" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.113, + 0.483, + 0.144 + ], + "angle": 0, + "content": "Title: Parameter Free Vision Mamba For Lightweight Image Super-Resolution" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.144, + 0.165, + 0.157 + ], + "angle": 0, + "content": "Members:" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.158, + 0.35, + 0.174 + ], + "angle": 0, + "content": "Weijian Deng\\(^{1}\\) (348957269@qq.com)," + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.174, + 0.334, + 0.189 + ], + "angle": 0, + "content": "Junnan \\(\\mathbf{W u}^{1}\\) (838050895@qq.com)," + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.189, + 0.357, + 0.204 + ], + "angle": 0, + "content": "Wenqin Deng\\(^{2}\\) (1601524278@qq.com)," + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.204, + 0.336, + 0.219 + ], + "angle": 0, + "content": "Yuquan Liu\\(^{1}\\) (653060432@qq.com)," + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.219, + 0.35, + 0.234 + ], + "angle": 0, + "content": "Zhaohong \\(\\mathrm{Xu}^{1}\\) (719357155@qq.com)," + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.234, + 0.176, + 0.248 + ], + "angle": 0, + "content": "Affiliations:" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.248, + 0.483, + 0.277 + ], + "angle": 0, + "content": "1 Guangxi China Tobacco Industry Corporation Limited, China" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.278, + 0.282, + 0.295 + ], + "angle": 0, + "content": "2 Guangxi University, China" + }, + { + "type": "list", + "bbox": [ + 0.092, + 0.248, + 0.483, + 0.295 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.092, + 0.318, + 0.141, + 0.333 + ], + "angle": 0, + "content": "IPCV" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.34, + 0.248, + 0.355 + ], + "angle": 0, + "content": "Title: Efficient HiTSR" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.356, + 0.164, + 0.369 + ], + "angle": 0, + "content": "Members:" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.37, + 0.403, + 0.386 + ], + "angle": 0, + "content": "Jameer Babu Pinjari \\(^{1}\\) (jameer.jb@gmail.com)," + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.386, + 0.424, + 0.401 + ], + "angle": 0, + "content": "Kuldeep Purohit \\(^{1}\\), (kuldeeppurohit3@gmail.com)" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.401, + 0.176, + 0.416 + ], + "angle": 0, + "content": "Affiliations:" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.416, + 0.26, + 0.431 + ], + "angle": 0, + "content": "\\(^{1}\\) Independent researcher" + }, + { + "type": "title", + "bbox": [ + 0.092, + 0.455, + 0.128, + 0.47 + ], + "angle": 0, + "content": "X-L" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.477, + 0.483, + 0.507 + ], + "angle": 0, + "content": "Title: Partial Permuted Self-Attention for Lightweight Super-Resolution" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.508, + 0.164, + 0.521 + ], + "angle": 0, + "content": "Members:" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.523, + 0.354, + 0.538 + ], + "angle": 0, + "content": "Zeyu Xiao\\(^{1}\\) (zeyuxiao1997@163.com)," + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.538, + 0.396, + 0.553 + ], + "angle": 0, + "content": "Zhuoyuan Li\\(^{2}\\) (zhuoyuanli@mail.ustc.edu.cn)" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.553, + 0.176, + 0.568 + ], + "angle": 0, + "content": "Affiliations:" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.568, + 0.324, + 0.583 + ], + "angle": 0, + "content": "\\(^{1}\\) National University of Singapore" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.583, + 0.417, + 0.598 + ], + "angle": 0, + "content": "\\(^{2}\\) University of Science and Technology of China" + }, + { + "type": "list", + "bbox": [ + 0.092, + 0.568, + 0.417, + 0.598 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.092, + 0.623, + 0.205, + 0.639 + ], + "angle": 0, + "content": "Quantum_Res" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.645, + 0.483, + 0.674 + ], + "angle": 0, + "content": "Title: Efficient Mamba-Based Image Super-Resolution via Knowledge Distillation" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.676, + 0.164, + 0.688 + ], + "angle": 0, + "content": "Members:" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.689, + 0.401, + 0.705 + ], + "angle": 0, + "content": "Surya Vashist\\(^{1}\\) (surya.vashisth@s.amity.edu)," + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.705, + 0.438, + 0.719 + ], + "angle": 0, + "content": "Akshay Dudhane\\(^{2}\\) (akshay.dudhane@mbzuai.ac.ae)," + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.719, + 0.378, + 0.735 + ], + "angle": 0, + "content": "Praful Hambarde3 (praful@iitmandi.ac.in)," + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.735, + 0.464, + 0.75 + ], + "angle": 0, + "content": "Sachin Chaudhary\\(^{4}\\) (sachin.chaudhary@ddn.upes.ac.in)," + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.75, + 0.404, + 0.765 + ], + "angle": 0, + "content": "Satya Naryan Tazi\\(^{5}\\) (satya.tazi@ecajmer.ac.in)," + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.765, + 0.334, + 0.78 + ], + "angle": 0, + "content": "Prashant Patil\\(^{6}\\) (pwpatil@iitg.ac.in)," + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.78, + 0.444, + 0.796 + ], + "angle": 0, + "content": "Santosh Kumar Vipparthi7 (skvipparthi@iitrpr.ac.in)," + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.796, + 0.378, + 0.811 + ], + "angle": 0, + "content": "Subrahmanyam Murala8 (muralas@tcd.ie)," + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.811, + 0.176, + 0.825 + ], + "angle": 0, + "content": "Affiliations:" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.825, + 0.314, + 0.84 + ], + "angle": 0, + "content": "1 Amity University Punjab, India" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.84, + 0.482, + 0.87 + ], + "angle": 0, + "content": "\\(^{2}\\) Mohamed Bin Zayed University of Artificial Intelligence, Abu Dhabi" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.871, + 0.394, + 0.886 + ], + "angle": 0, + "content": "3 Indian Institute of Technology Mandi, India" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.886, + 0.258, + 0.9 + ], + "angle": 0, + "content": "4 UPES Dehradun, India" + }, + { + "type": "list", + "bbox": [ + 0.092, + 0.825, + 0.482, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.091, + 0.837, + 0.107 + ], + "angle": 0, + "content": "\\(^{5}\\) Government Engineering College Ajmer, India" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.107, + 0.837, + 0.121 + ], + "angle": 0, + "content": "\\(^{6}\\) Indian Institute of Technology Guwahati, India" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.122, + 0.813, + 0.137 + ], + "angle": 0, + "content": "\\(^{7}\\) Indian Institute of Technology Ropar, India" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.137, + 0.734, + 0.152 + ], + "angle": 0, + "content": "\\(^{8}\\) Trinity College Dublin, Ireland" + }, + { + "type": "list", + "bbox": [ + 0.514, + 0.091, + 0.837, + 0.152 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.177, + 0.586, + 0.193 + ], + "angle": 0, + "content": "SylabSR" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.199, + 0.884, + 0.214 + ], + "angle": 0, + "content": "Title: AutoRegressive Residual Local Feature Network" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.215, + 0.586, + 0.228 + ], + "angle": 0, + "content": "Members:" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.229, + 0.801, + 0.244 + ], + "angle": 0, + "content": "Wei-Chen Shen\\(^{1}\\) (r11921a38@ntu.edu.tw)," + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.244, + 0.641, + 0.259 + ], + "angle": 0, + "content": "I-Hsiang Chen\\(^{1,2}\\)," + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.26, + 0.598, + 0.274 + ], + "angle": 0, + "content": "Affiliations:" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.274, + 0.71, + 0.29 + ], + "angle": 0, + "content": "\\(^{1}\\) National Taiwan University" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.29, + 0.698, + 0.305 + ], + "angle": 0, + "content": "2 University of Washington" + }, + { + "type": "list", + "bbox": [ + 0.515, + 0.274, + 0.71, + 0.305 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.33, + 0.591, + 0.345 + ], + "angle": 0, + "content": "NJUPCA" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.352, + 0.907, + 0.382 + ], + "angle": 0, + "content": "Title: Spatial-Frequency Fusion Model for Efficient Super-Resolution" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.383, + 0.586, + 0.397 + ], + "angle": 0, + "content": "Members:" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.397, + 0.813, + 0.413 + ], + "angle": 0, + "content": "Yunzhe \\(\\mathbf{X}\\mathbf{u}^{1}\\) (221900144@smail.nju.edu.cn)," + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.413, + 0.603, + 0.427 + ], + "angle": 0, + "content": "Chen Zhao1," + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.427, + 0.623, + 0.442 + ], + "angle": 0, + "content": "Zhizhou Chen1," + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.443, + 0.598, + 0.458 + ], + "angle": 0, + "content": "Affiliations:" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.458, + 0.655, + 0.473 + ], + "angle": 0, + "content": "\\(^{1}\\) Nanjing University" + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.498, + 0.598, + 0.514 + ], + "angle": 0, + "content": "DepthIBN" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.52, + 0.906, + 0.551 + ], + "angle": 0, + "content": "Title: Involution and BSConv Multi-Depth Distillation Network for Lightweight Image Super-Resolution" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.551, + 0.586, + 0.564 + ], + "angle": 0, + "content": "Members:" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.565, + 0.882, + 0.595 + ], + "angle": 0, + "content": "Akram Khatami-Rizi \\(^{1}\\) (akramkhatami67@gmail.com), Ahmad Mahmoudi-Aznaveh \\(^{1}\\), (a.mahmoudi@sbu.ac.ir" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.596, + 0.598, + 0.61 + ], + "angle": 0, + "content": "Affiliations:" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.61, + 0.905, + 0.641 + ], + "angle": 0, + "content": "1 Cyberspace Research Institute of Shahid Beheshti University of Iran" + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.666, + 0.598, + 0.68 + ], + "angle": 0, + "content": "Cidaut.AI" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.688, + 0.768, + 0.703 + ], + "angle": 0, + "content": "Title: Fused Edge Attention Network" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.704, + 0.586, + 0.716 + ], + "angle": 0, + "content": "Members:" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.718, + 0.779, + 0.733 + ], + "angle": 0, + "content": "Alejandro Merino1 (alemer@cidaut.es)," + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.733, + 0.771, + 0.748 + ], + "angle": 0, + "content": "Bruno Longarela1 (brulon@cidaut.es)," + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.748, + 0.737, + 0.763 + ], + "angle": 0, + "content": "Javier Abad1 (javaba@cidadut.es)," + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.763, + 0.878, + 0.778 + ], + "angle": 0, + "content": "Marcos V. Conde\\(^{2}\\) (marcos.conde@uni-wuerzburg.de)," + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.779, + 0.598, + 0.793 + ], + "angle": 0, + "content": "Affiliations:" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.794, + 0.641, + 0.809 + ], + "angle": 0, + "content": "1 Cidaut AI, Spain" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.809, + 0.754, + 0.824 + ], + "angle": 0, + "content": "\\(^{2}\\) University of Würzburg, Germany" + }, + { + "type": "list", + "bbox": [ + 0.515, + 0.794, + 0.754, + 0.824 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.848, + 0.551, + 0.863 + ], + "angle": 0, + "content": "IVL" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.871, + 0.618, + 0.884 + ], + "angle": 0, + "content": "Title: PAEDN" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.886, + 0.586, + 0.9 + ], + "angle": 0, + "content": "Members:" + }, + { + "type": "page_number", + "bbox": [ + 0.489, + 0.925, + 0.509, + 0.937 + ], + "angle": 0, + "content": "43" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.09, + 0.092, + 0.411, + 0.106 + ], + "angle": 0, + "content": "Simone Bianco\\(^{1}\\) (simone.bianco@unimib.com)," + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.107, + 0.35, + 0.121 + ], + "angle": 0, + "content": "Luca Cogo1 (luca.cogo@unimib.com)," + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.122, + 0.454, + 0.137 + ], + "angle": 0, + "content": "Gianmarco Corti1 (g.corti1967@campus.unimib.com)," + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.153, + 0.175, + 0.166 + ], + "angle": 0, + "content": "Affiliations:" + }, + { + "type": "ref_text", + "bbox": [ + 0.09, + 0.167, + 0.483, + 0.213 + ], + "angle": 0, + "content": "\\(^{1}\\) Department of Informatics Systems and Communication, University of Milano-Bicocca, Viale Sarca 336, Building U14, Milan, Italy" + }, + { + "type": "title", + "bbox": [ + 0.093, + 0.241, + 0.188, + 0.257 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.108, + 0.266, + 0.483, + 0.349 + ], + "angle": 0, + "content": "[1] Lusine Abrahamyan, Anh Minh Truong, Wilfried Philips, and Nikos Deligiannis. Gradient variance loss for structure-enhanced image super-resolution. In ICASSP 2022-2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 3219-3223. IEEE, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.107, + 0.351, + 0.483, + 0.419 + ], + "angle": 0, + "content": "[2] Eirikur Agustsson and Radu Timofte. Ntire 2017 challenge on single image super-resolution: Dataset and study. In 2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW), pages 1122-1131, 2017. 14" + }, + { + "type": "ref_text", + "bbox": [ + 0.107, + 0.421, + 0.483, + 0.477 + ], + "angle": 0, + "content": "[3] Eirikur Agustsson and Radu Timofte. Ntire 2017 challenge on single image super-resolution: Dataset and study. In The IEEE Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2017. 33" + }, + { + "type": "ref_text", + "bbox": [ + 0.107, + 0.478, + 0.483, + 0.547 + ], + "angle": 0, + "content": "[4] Eirikur Agustsson and Radu Timofte. NTIRE 2017 challenge on single image super-resolution: Dataset and study. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops, pages 126-135, 2017. 2, 18, 19, 22, 23, 26, 33" + }, + { + "type": "ref_text", + "bbox": [ + 0.107, + 0.549, + 0.483, + 0.604 + ], + "angle": 0, + "content": "[5] Akram Khatami-Rizi Ahmad Mahmoudi-Aznaveh. The role of involution in lightweight super resolution. 2024 13th Iranian/3rd International Machine Vision and Image Processing Conference (MVIP), 2024. 37" + }, + { + "type": "ref_text", + "bbox": [ + 0.107, + 0.606, + 0.483, + 0.66 + ], + "angle": 0, + "content": "[6] Akram Khatami-Rizi Ahmad Mahmoudi-Aznaveh. Involution and bsconv multi-depth distillation network for lightweight image super-resolution. arXiv preprint arXiv:2503.14779, 2025. 37" + }, + { + "type": "ref_text", + "bbox": [ + 0.107, + 0.662, + 0.483, + 0.717 + ], + "angle": 0, + "content": "[7] Sidra Aleem, Julia Dietlmeier, Eric Arazo, and Suzanne Little. Convlora and adabn based domain adaptation via self-training. In 2024 IEEE International Symposium on Biomedical Imaging (ISBI), pages 1-5. IEEE, 2024. 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.107, + 0.719, + 0.483, + 0.814 + ], + "angle": 0, + "content": "[8] Jiezhang Cao, Qin Wang, Yongqin Xian, Yawei Li, Bingbing Ni, Zhiming Pi, Kai Zhang, Yulun Zhang, Radu Timofte, and Luc Van Gool. Ciaosr: Continuous implicit attention-in-attention network for arbitrary-scale image super-resolution. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1796–1807, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.107, + 0.817, + 0.483, + 0.872 + ], + "angle": 0, + "content": "[9] Jierun Chen, Shiu-hong Kao, Hao He, Weipeng Zhuo, Song Wen, Chul-Ho Lee, and S-H Gary Chan. Run, don't walk: Chasing higher flops for faster neural networks. In IEEE Conf. Comput. Vis. Pattern Recog., 2023. 33" + }, + { + "type": "ref_text", + "bbox": [ + 0.1, + 0.873, + 0.482, + 0.901 + ], + "angle": 0, + "content": "[10] Liangyu Chen, Xiaojie Chu, Xiangyu Zhang, and Jian Sun. Simple baselines for image restoration, 2022. 38" + }, + { + "type": "list", + "bbox": [ + 0.1, + 0.266, + 0.483, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.093, + 0.906, + 0.176 + ], + "angle": 0, + "content": "[11] Zheng Chen, Zongwei Wu, Eduard Zamfir, Kai Zhang, Yu-lun Zhang, Radu Timofte, Xiaokang Yang, Hongyuan Yu, Cheng Wan, Yuxin Hong, et al. Ntire 2024 challenge on image super-resolution (x4): Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6108-6132, 2024. 30" + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.178, + 0.906, + 0.26 + ], + "angle": 0, + "content": "[12] Zheng Chen, Kai Liu, Jue Gong, Jingkai Wang, Lei Sun, Zongwei Wu, Radu Timofte, Yulun Zhang, et al. NTIRE 2025 challenge on image super-resolution \\((\\times 4)\\): Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.262, + 0.906, + 0.345 + ], + "angle": 0, + "content": "[13] Zheng Chen, Jingkai Wang, Kai Liu, Jue Gong, Lei Sun, Zongwei Wu, Radu Timofte, Yulun Zhang, et al. NTIRE 2025 challenge on real-world face restoration: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.347, + 0.906, + 0.389 + ], + "angle": 0, + "content": "[14] Sung-Jin Cho, Seo-Won Ji, Jun-Pyo Hong, Seung-Won Jung, and Sung-Jea Ko. Rethinking coarse-to-fine approach in single image deblurring. In ICCV, 2021. 10, 17, 29" + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.39, + 0.906, + 0.445 + ], + "angle": 0, + "content": "[15] Sung-Jin Cho, Seo-Won Ji, Jun-Pyo Hong, Seung-Won Jung, and Sung-Jea Ko. Rethinking coarse-to-fine approach in single image deblurring. In ICCV, pages 4641-4650, 2021. 18, 25, 26" + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.447, + 0.906, + 0.503 + ], + "angle": 0, + "content": "[16] Marcos Conde, Radu Timofte, et al. NTIRE 2025 challenge on raw image restoration and super-resolution. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.505, + 0.906, + 0.572 + ], + "angle": 0, + "content": "[17] Marcos Conde, Radu Timofte, et al. Raw image reconstruction from RGB on smartphones. NTIRE 2025 challenge report. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.576, + 0.906, + 0.643 + ], + "angle": 0, + "content": "[18] Marcos V Conde, Zhijun Lei, Wen Li, Christos Bampis, Ioannis Katsavounidis, and Radu Timofte. Aim 2024 challenge on efficient video super-resolution for av1 compressed content. arXiv preprint arXiv:2409.17256, 2024. 30" + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.646, + 0.908, + 0.716 + ], + "angle": 0, + "content": "[19] Weijian Deng, Hongjie Yuan, Lunhui Deng, and Zengtong Lu. Reparameterized residual feature network for lightweight image super-resolution. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 1712-1721, 2023. 22" + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.718, + 0.906, + 0.787 + ], + "angle": 0, + "content": "[20] Xiaohan Ding, Yuchen Guo, Guiguang Ding, and Jungong Han. Acnet: Strengthening the kernel skeletons for powerful cnn via asymmetric convolution blocks. In Proceedings of the IEEE/CVF international conference on computer vision, pages 1911-1920, 2019. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.789, + 0.906, + 0.858 + ], + "angle": 0, + "content": "[21] Xiaohan Ding, Xiangyu Zhang, Jungong Han, and Guiguang Ding. Diverse branch block: Building a convolution as an inception-like unit. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10886-10895, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.86, + 0.906, + 0.901 + ], + "angle": 0, + "content": "[22] Xiaohan Ding, Xiangyu Zhang, Ningning Ma, Jungong Han, Guiguang Ding, and Jian Sun. Repvgg: Making veg-style convnets great again. In Proceedings of the IEEE/CVF" + }, + { + "type": "list", + "bbox": [ + 0.523, + 0.093, + 0.908, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.925, + 0.509, + 0.937 + ], + "angle": 0, + "content": "44" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.133, + 0.093, + 0.482, + 0.12 + ], + "angle": 0, + "content": "Conference on Computer Vision and Pattern Recognition, pages 13733-13742, 2021. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.101, + 0.122, + 0.483, + 0.164 + ], + "angle": 0, + "content": "[23] Xiaohan Ding, Xiangyu Zhang, Ningning Ma, Jungong Han, Guiguang Ding, and Jian Sun. Repvgg: Making vgg-style convnets great again. In CVPR, 2021. 9, 17" + }, + { + "type": "ref_text", + "bbox": [ + 0.101, + 0.165, + 0.483, + 0.234 + ], + "angle": 0, + "content": "[24] Jie Du, Kai Guan, Yanhong Zhou, Yuanman Li, and Tianfu Wang. Parameter-free similarity-aware attention module for medical image classification and segmentation. IEEE Transactions on Emerging Topics in Computational Intelligence, 2022. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.236, + 0.483, + 0.305 + ], + "angle": 0, + "content": "[25] Zongcai Du, Ding Liu, Jie Liu, Jie Tang, Gangshan Wu, and Lean Fu. Fast and memory-efficient network towards efficient image super-resolution. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 853-862, 2022. 18" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.307, + 0.483, + 0.376 + ], + "angle": 0, + "content": "[26] Zongcai Du, Ding Liu, Jie Liu, Jie Tang, Gangshan Wu, and Lean Fu. Fast and memory-efficient network towards efficient image super-resolution. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 853-862, 2022. 36" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.378, + 0.482, + 0.419 + ], + "angle": 0, + "content": "[27] Stefan Elfwing, Eiji Uchibe, and Kenji Doya. Sigmoid-weighted linear units for neural network function approximation in reinforcement learning, 2017. 15, 17" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.421, + 0.483, + 0.531 + ], + "angle": 0, + "content": "[28] Egor Ershov, Sergey Korchagin, Alexei Khalin, Artyom Panshin, Arsenyi Terekhin, Ekaterina Zaychenkova, Georgiy Lobarev, Vsevolod Plokhotnyuk, Denis Abramov, Elisey Zhdanov, Sofia Dorogova, Yasin Mamedov, Nikola Banic, Georgii Perevozhikov, Radu Timofte, et al. NTIRE 2025 challenge on night photography rendering. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.533, + 0.483, + 0.615 + ], + "angle": 0, + "content": "[29] Yuqian Fu, Xingyu Qiu, Bin Ren Yanwei Fu, Radu Timofte, Nicu Sebe, Ming-Hsuan Yang, Luc Van Gool, et al. NTIRE 2025 challenge on cross-domain few-shot object detection: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.617, + 0.483, + 0.657 + ], + "angle": 0, + "content": "[30] Albert Gu and Tri Dao. Mamba: Linear-time sequence modeling with selective state spaces. arXiv preprint arXiv:2312.00752, 2023. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.661, + 0.483, + 0.728 + ], + "angle": 0, + "content": "[31] Enxuan Gu, Hongwei Ge, and Yong Guo. Code: An explicit content decoupling framework for image restoration. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2920-2930, 2024. 14" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.732, + 0.483, + 0.786 + ], + "angle": 0, + "content": "[32] Hang Guo, Yong Guo, Yaohua Zha, Yulun Zhang, Wenbo Li, Tao Dai, Shu-Tao Xia, and Yawei Li. Mambairv2: Attentive state space restoration. arXiv preprint arXiv:2411.15269, 2024. 6, 30, 34, 35" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.789, + 0.483, + 0.856 + ], + "angle": 0, + "content": "[33] Hang Guo, Jinmin Li, Tao Dai, Zhihao Ouyang, Xudong Ren, and Shu-Tao Xia. Mambair: A simple baseline for image restoration with state-space model. In European Conference on Computer Vision, pages 222-241. Springer, 2024. 33" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.859, + 0.483, + 0.901 + ], + "angle": 0, + "content": "[34] Daniel Haase and Manuel Amthor. Rethinking depthwise separable convolutions: How intra-kernel correlations lead to improved mobilenets. In Proceedings of the IEEE/CVF" + }, + { + "type": "list", + "bbox": [ + 0.101, + 0.093, + 0.483, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.554, + 0.094, + 0.905, + 0.12 + ], + "angle": 0, + "content": "conference on computer vision and pattern recognition, pages 14600-14609, 2020. 31, 37" + }, + { + "type": "ref_text", + "bbox": [ + 0.525, + 0.122, + 0.905, + 0.177 + ], + "angle": 0, + "content": "[35] Kai Han, Yunhe Wang, Qi Tian, Jianyuan Guo, Chunjing Xu, and Chang Xu. Ghostnet: More features from cheap operations. In IEEE Conf. Comput. Vis. Pattern Recog., pages 1580-1589, 2020. 19" + }, + { + "type": "ref_text", + "bbox": [ + 0.524, + 0.179, + 0.905, + 0.262 + ], + "angle": 0, + "content": "[36] Shuhao Han, Haotian Fan, Fangyuan Kong, Wenjie Liao, Chunle Guo, Chongyi Li, Radu Timofte, et al. NTIRE 2025 challenge on text to image generation model quality assessment. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.524, + 0.264, + 0.905, + 0.331 + ], + "angle": 0, + "content": "[37] Zibin He, Tao Dai, Jian Lu, Yong Jiang, and Shu-Tao Xia. Faked: Feature-affinity based knowledge distillation for efficient image super-resolution. In 2020 IEEE international conference on image processing (ICIP), pages 518-522. IEEE, 2020. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.524, + 0.334, + 0.905, + 0.362 + ], + "angle": 0, + "content": "[38] Dan Hendrycks and Kevin Gimpel. Gaussian error linear units (gelus). arXiv preprint arXiv:1606.08415, 2016. 25" + }, + { + "type": "ref_text", + "bbox": [ + 0.524, + 0.364, + 0.905, + 0.433 + ], + "angle": 0, + "content": "[39] Andrew Howard, Mark Sandler, Grace Chu, Liang-Chieh Chen, Bo Chen, Mingxing Tan, Weijun Wang, Yukun Zhu, Ruoming Pang, Vijay Vasudevan, et al. Searching for MobileNetV3. In Proceedings of the IEEE International Conference on Computer Vision, pages 1314-1324, 2019. 26" + }, + { + "type": "ref_text", + "bbox": [ + 0.524, + 0.435, + 0.905, + 0.504 + ], + "angle": 0, + "content": "[40] Andrew G Howard, Menglong Zhu, Bo Chen, Dmitry Kalenichenko, Weijun Wang, Tobias Weyand, Marco Andreetto, and Hartwig Adam. Mobilenets: Efficient convolutional neural networks for mobile vision applications. arXiv preprint arXiv:1704.04861, 2017. 37" + }, + { + "type": "ref_text", + "bbox": [ + 0.524, + 0.505, + 0.905, + 0.559 + ], + "angle": 0, + "content": "[41] Mu Hu, Junyi Feng, Jiashen Hua, Baisheng Lai, Jianqiang Huang, Xiaojin Gong, and Xian-Sheng Hua. Online convolutional re-parameterization. CoRR, abs/2204.00826, 2022. 19" + }, + { + "type": "ref_text", + "bbox": [ + 0.524, + 0.563, + 0.905, + 0.63 + ], + "angle": 0, + "content": "[42] Zhewei Huang, Tianyuan Zhang, Wen Heng, Boxin Shi, and Shuchang Zhou. Real-time intermediate flow estimation for video frame interpolation. In Proceedings of the European Conference on Computer Vision (ECCV), 2022. 6, 9" + }, + { + "type": "ref_text", + "bbox": [ + 0.524, + 0.633, + 0.905, + 0.701 + ], + "angle": 0, + "content": "[43] Zheng Hui, Xiumei Wang, and Xinbo Gao. Fast and accurate single image super-resolution via information distillation network. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 723-731, 2018. 36" + }, + { + "type": "ref_text", + "bbox": [ + 0.524, + 0.704, + 0.905, + 0.771 + ], + "angle": 0, + "content": "[44] Zheng Hui, Xinbo Gao, Yunchu Yang, and Xiumei Wang. Lightweight image super-resolution with information multi-distillation network. In Proceedings of the 27th acm international conference on multimedia, pages 2024-2032, 2019. 11" + }, + { + "type": "ref_text", + "bbox": [ + 0.524, + 0.775, + 0.905, + 0.843 + ], + "angle": 0, + "content": "[45] Zheng Hui, Xinbo Gao, Yunchu Yang, and Xiumei Wang. Lightweight image super-resolution with information multi-distillation network. In Proceedings of the 27th acm international conference on multimedia, pages 2024-2032, 2019. 10, 36" + }, + { + "type": "ref_text", + "bbox": [ + 0.524, + 0.846, + 0.905, + 0.901 + ], + "angle": 0, + "content": "[46] Pavel Izmailov, Dmitrii Podoprikhin, Timur Garipov, Dmitry Vetrov, and Andrew Gordon Wilson. Averaging weights leads to wider optima and better generalization. arXiv preprint arXiv:1803.05407, 2018. 23" + }, + { + "type": "list", + "bbox": [ + 0.524, + 0.094, + 0.905, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.925, + 0.508, + 0.936 + ], + "angle": 0, + "content": "45" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.101, + 0.093, + 0.482, + 0.176 + ], + "angle": 0, + "content": "[47] Varun Jain, Zongwei Wu, Quan Zou, Louis Florentin, Henrik Turbell, Sandeep Siddhartha, Radu Timofte, et al. NTIRE 2025 challenge on video quality enhancement for video conferencing: Datasets, methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.101, + 0.178, + 0.482, + 0.233 + ], + "angle": 0, + "content": "[48] Yuxuan Jiang, Chen Feng, Fan Zhang, and David Bull. Mtkd: Multi-teacher knowledge distillation for image super-resolution. In European Conference on Computer Vision, pages 364–382. Springer, 2024. 30, 31" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.235, + 0.483, + 0.302 + ], + "angle": 0, + "content": "[49] Yuxuan Jiang, Ho Man Kwan, Tianhao Peng, Ge Gao, Fan Zhang, Xiaqing Zhu, Joel Sole, and David Bull. HIIF: Hierarchical encoding based implicit image function for continuous super-resolution. arXiv preprint arXiv:2412.03748, 2024. 30" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.306, + 0.483, + 0.36 + ], + "angle": 0, + "content": "[50] Yuxuan Jiang, Jakub Nawala, Chen Feng, Fan Zhang, Xiaogjing Zhu, Joel Sole, and David Bull. Rtsr: A real-time super-resolution model for av1 compressed content. arXiv preprint arXiv:2411.13362, 2024. 30" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.362, + 0.482, + 0.417 + ], + "angle": 0, + "content": "[51] Yuxuan Jiang, Jakub Nawala, Fan Zhang, and David Bull. Compressing deep image super-resolution models. In 2024 Picture Coding Symposium (PCS), pages 1-5. IEEE, 2024. 14, 30" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.42, + 0.482, + 0.487 + ], + "angle": 0, + "content": "[52] Yuxuan Jiang, Chengxi Zeng, Siyue Teng, Fan Zhang, Xiaogjing Zhu, Joel Sole, and David Bull. C2D-ISR: Optimizing attention-based image super-resolution from continuous to discrete scales. arXiv preprint arXiv:2503.13740, 2025. 30, 31" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.49, + 0.482, + 0.531 + ], + "angle": 0, + "content": "[53] Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014. 12" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.533, + 0.482, + 0.574 + ], + "angle": 0, + "content": "[54] Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014. 8, 14, 18, 28, 30" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.577, + 0.482, + 0.645 + ], + "angle": 0, + "content": "[55] F. Kong, Mingxi Li, Songwei Liu, Ding Liu, Jingwen He, Yang Bai, Fangmin Chen, and Lean Fu. Residual local feature network for efficient super-resolution. 2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW), pages 765-775, 2022. 19, 22" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.648, + 0.482, + 0.716 + ], + "angle": 0, + "content": "[56] Fangyuan Kong, Mingxi Li, Songwei Liu, Ding Liu, Jingwen He, Yang Bai, Fangmin Chen, and Lean Fu. Residual local feature network for efficient super-resolution. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 766-776, 2022. 18, 35" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.719, + 0.482, + 0.772 + ], + "angle": 0, + "content": "[57] Kin Wai Lau, Lai-Man Po, and Yasar Abbas Ur Rehman. Large separable kernel attention: Rethinking the large kernel attention design in cnn. Expert Systems with Applications, 236:121352, 2023. 28" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.776, + 0.482, + 0.871 + ], + "angle": 0, + "content": "[58] Sangmin Lee, Eunpil Park, Angel Canelo, Hyunhee Park, Youngjo Kim, Hyungju Chun, Xin Jin, Chongyi Li, Chun-Le Guo, Radu Timofte, et al. NTIRE 2025 challenge on efficient burst hdr and restoration: Datasets, methods, and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.874, + 0.482, + 0.901 + ], + "angle": 0, + "content": "[59] Xiaoyan Lei, Wenlong Zhang, and Weifeng Cao. Dvmsr: Distillated vision mamba for efficient super-resolution. In" + }, + { + "type": "list", + "bbox": [ + 0.101, + 0.093, + 0.483, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.554, + 0.093, + 0.905, + 0.134 + ], + "angle": 0, + "content": "Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, pages 6536-6546, 2024. 33" + }, + { + "type": "ref_text", + "bbox": [ + 0.525, + 0.137, + 0.905, + 0.205 + ], + "angle": 0, + "content": "[60] Duo Li, Jie Hu, Changhu Wang, Xiangtai Li, Qi She, Lei Zhu, Tong Zhang, and Qifeng Chen. Involution: Inverting the inheritance of convolution for visual recognition. Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2021. 37" + }, + { + "type": "ref_text", + "bbox": [ + 0.525, + 0.208, + 0.905, + 0.304 + ], + "angle": 0, + "content": "[61] Xin Li, Yeying Jin, Xin Jin, Zongwei Wu, Bingchen Li, Yufei Wang, Wenhan Yang, Yu Li, Zhibo Chen, Bihan Wen, Robby Tan, Radu Timofte, et al. NTIRE 2025 challenge on day and night raindrop removal for dual-focused images: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.525, + 0.307, + 0.905, + 0.403 + ], + "angle": 0, + "content": "[62] Xin Li, Xijun Wang, Bingchen Li, Kun Yuan, Yizhen Shao, Suhang Yao, Ming Sun, Chao Zhou, Radu Timofte, and Zhibo Chen. NTIRE 2025 challenge on short-formUGC video quality assessment and enhancement: Kwaisr dataset and study. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.525, + 0.406, + 0.905, + 0.503 + ], + "angle": 0, + "content": "[63] Xin Li, Kun Yuan, Bingchen Li, Fengbin Guan, Yizhen Shao, Zihao Yu, Xijun Wang, Yiting Lu, Wei Luo, Suhang Yao, Ming Sun, Chao Zhou, Zhibo Chen, Radu Timofte, et al. NTIRE 2025 challenge on short-formUGC video quality assessment and enhancement: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.525, + 0.505, + 0.905, + 0.587 + ], + "angle": 0, + "content": "[64] Yawei Li, Kai Zhang, Jingyun Liang, Jiezhang Cao, Ce Liu, Rui Gong, Yulun Zhang, Hao Tang, Yun Liu, Denis Demandolx, et al. Lsdir: A large scale dataset for image restoration. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops, 2023. 2, 6, 10, 12, 14, 16, 17, 18, 19, 23, 24, 26, 28, 30, 33, 36" + }, + { + "type": "ref_text", + "bbox": [ + 0.525, + 0.591, + 0.905, + 0.659 + ], + "angle": 0, + "content": "[65] Yawei Li, Yulun Zhang, Luc Van Gool, Radu Timofte, et al. NTIRE 2023 challenge on efficient super-resolution: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops, 2023. 15, 16" + }, + { + "type": "ref_text", + "bbox": [ + 0.525, + 0.661, + 0.905, + 0.743 + ], + "angle": 0, + "content": "[66] Zheyuan Li, Yingqi Liu, Xiangyu Chen, Haoming Cai, Jinjin Gu, Yu Qiao, and Chao Dong. Blueprint separable residual network for efficient image super-resolution. In 2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW), pages 832-842, 2022. 13, 26" + }, + { + "type": "ref_text", + "bbox": [ + 0.525, + 0.747, + 0.905, + 0.816 + ], + "angle": 0, + "content": "[67] Zheyuan Li, Yingqi Liu, Xiangyu Chen, Haoming Cai, Jinjin Gu, Yu Qiao, and Chao Dong. Blueprint separable residual network for efficient image super-resolution. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 833-843, 2022. 10, 36" + }, + { + "type": "ref_text", + "bbox": [ + 0.525, + 0.818, + 0.905, + 0.901 + ], + "angle": 0, + "content": "[68] Jie Liang, Radu Timofte, Qiaosi Yi, Zhengqiang Zhang, Shuaizheng Liu, Lingchen Sun, Rongyuan Wu, Xindong Zhang, Hui Zeng, Lei Zhang, et al. NTIRE 2025 the 2nd restore any image model (RAIM) in the wild challenge. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + }, + { + "type": "list", + "bbox": [ + 0.525, + 0.093, + 0.905, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.926, + 0.509, + 0.937 + ], + "angle": 0, + "content": "46" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.101, + 0.092, + 0.486, + 0.161 + ], + "angle": 0, + "content": "[69] Bee Lim, Sanghyun Son, Heewon Kim, Seungjun Nah, and Kyoung Mu Lee. Enhanced deep residual networks for single image super-resolution. In 2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW), pages 1132-1140, 2017. 14" + }, + { + "type": "ref_text", + "bbox": [ + 0.101, + 0.162, + 0.484, + 0.232 + ], + "angle": 0, + "content": "[70] Bee Lim, Sanghyun Son, Heewon Kim, Seungjun Nah, and Young Mu Lee. Enhanced deep residual networks for single image super-resolution. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops, pages 1132-1140, 2017. 12, 17, 26, 28, 30" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.233, + 0.483, + 0.287 + ], + "angle": 0, + "content": "[71] Jie Liu, Jie Tang, and Gangshan Wu. Residual feature distillation network for lightweight image super-resolution. In Proceedings of the European Conference on Computer Vision Workshops, pages 41-55. Springer, 2020. 10, 32, 36" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.288, + 0.483, + 0.357 + ], + "angle": 0, + "content": "[72] Jie Liu, Jie Tang, and Gangshan Wu. Residual feature distillation network for lightweight image super-resolution. In Computer Vision-ECCV 2020 Workshops: Glasgow, UK, August 23-28, 2020, Proceedings, Part III 16, pages 41-55. Springer, 2020. 21" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.359, + 0.483, + 0.426 + ], + "angle": 0, + "content": "[73] Jie Liu, Wenjie Zhang, Yuting Tang, Jie Tang, and Gangshan Wu. Residual feature aggregation network for image super-resolution. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 2359-2368, 2020. 11" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.427, + 0.483, + 0.496 + ], + "angle": 0, + "content": "[74] Xiaohong Liu, Xiongkuo Min, Qiang Hu, Xiaoyun Zhang, Jie Guo, et al. NTIRE 2025 XGC quality assessment challenge: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.497, + 0.483, + 0.58 + ], + "angle": 0, + "content": "[75] Xiaoning Liu, Zongwei Wu, Florin-Alexandru Vasluianu, Hailong Yan, Bin Ren, Yulun Zhang, Shuhang Gu, Le Zhang, Ce Zhu, Radu Timofte, et al. NTIRE 2025 challenge on low light image enhancement: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.58, + 0.483, + 0.621 + ], + "angle": 0, + "content": "[76] Zhuang Liu, Mingjie Sun, Tinghui Zhou, Gao Huang, and Trevor Darrell. Rethinking the value of network pruning. In ICLR, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.622, + 0.483, + 0.692 + ], + "angle": 0, + "content": "[77] Zhaoyang Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, and Baining Guo. Proceedings of the IEEE/cvf international conference on computer vision. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 10012-10022, 2021. 12" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.692, + 0.483, + 0.719 + ], + "angle": 0, + "content": "[78] Ilya Loshchilov and Frank Hutter. Sgdr: Stochastic gradient descent with warm restarts. In *ICLR*, 2017, 17, 29" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.72, + 0.483, + 0.788 + ], + "angle": 0, + "content": "[79] Qi Ma, Yue Li, Bin Ren, Nicu Sebe, Ender Konukoglu, Theo Gevers, Luc Van Gool, and Danda Pani Paudel. Shapesplat: A large-scale dataset of gaussian splats and their self-supervised pretraining. In International Conference on 3D Vision 2025, 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.79, + 0.483, + 0.873 + ], + "angle": 0, + "content": "[80] Yanyu Mao, Nihao Zhang, Qian Wang, Bendu Bai, Wanying Bai, Haonan Fang, Peng Liu, Mingyue Li, and Shengbo Yan. Multi-level dispersion residual network for efficient image super-resolution. In 2023 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW), pages 1660-1669, 2023. 12" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.873, + 0.483, + 0.902 + ], + "angle": 0, + "content": "[81] Yanyu Mao, Nihao Zhang, Qian Wang, Bendu Bai, Wanying Bai, Haonan Fang, Peng Liu, Mingyue Li, and Shengbo" + }, + { + "type": "list", + "bbox": [ + 0.101, + 0.092, + 0.486, + 0.902 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.553, + 0.093, + 0.906, + 0.148 + ], + "angle": 0, + "content": "Yan. Multi-level dispersion residual network for efficient image super-resolution. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops, pages 1660-1669, 2023. 10, 11, 28" + }, + { + "type": "ref_text", + "bbox": [ + 0.525, + 0.15, + 0.905, + 0.219 + ], + "angle": 0, + "content": "[82] Jakub Nawala, Yuxuan Jiang, Fan Zhang, Xiaqing Zhu, Joel Sole, and David Bull. Bvi-aom: A new training dataset for deep video compression optimization. In 2024 IEEE International Conference on Visual Communications and Image Processing (VCIP), pages 1-5. IEEE, 2024. 30" + }, + { + "type": "ref_text", + "bbox": [ + 0.524, + 0.221, + 0.905, + 0.275 + ], + "angle": 0, + "content": "[83] Ying Nie, Kai Han, Zhenhua Liu, An Xiao, Yiping Deng, Chunjing Xu, and Yunhe Wang. Ghostsr: Learning ghost features for efficient image super-resolution. CoRR, abs/2101.08525, 2021. 19" + }, + { + "type": "ref_text", + "bbox": [ + 0.524, + 0.278, + 0.905, + 0.332 + ], + "angle": 0, + "content": "[84] Seung Park, Yoon-Jae Yeo, and Yong-Goo Shin. Pconv: simple yet effective convolutional layer for generative adversarial network. Neural Computing and Applications, 34 (9):7113-7124, 2022. 37, 38" + }, + { + "type": "ref_text", + "bbox": [ + 0.525, + 0.334, + 0.905, + 0.389 + ], + "angle": 0, + "content": "[85] Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, and Soumith Chintala. Pytorch: An imperative style, high-performance deep learning library. arXiv preprint arXiv:1912.01703, 2019. 18" + }, + { + "type": "ref_text", + "bbox": [ + 0.525, + 0.392, + 0.905, + 0.474 + ], + "angle": 0, + "content": "[86] Danfeng Qin, Chas Leichner, Manolis Delakis, Marco Fornoni, Shixin Luo, Fan Yang, Weijun Wang, Colby Banbury, Chengxi Ye, Berkin Akin, Vaibhav Aggarwal, Tenghui Zhu, Daniele Moro, and Andrew Howard. Mobilenetv4 - universal models for the mobile ecosystem, 2024. 37, 38" + }, + { + "type": "ref_text", + "bbox": [ + 0.525, + 0.477, + 0.905, + 0.532 + ], + "angle": 0, + "content": "[87] Yajun Qiu, Qiang Zhu, Shuyuan Zhu, and Bing Zeng. Dual circle contrastive learning-based blind image superresolution. IEEE Transactions on Circuits and Systems for Video Technology, 34(3):1757-1771, 2023. 30" + }, + { + "type": "ref_text", + "bbox": [ + 0.525, + 0.534, + 0.905, + 0.588 + ], + "angle": 0, + "content": "[88] Yunpeng Qu, Kun Yuan, Jinhua Hao, Kai Zhao, Qizhi Xie, Ming Sun, and Chao Zhou. Visual autoregressive modeling for image super-resolution. arXiv preprint arXiv:2501.18993, 2025. 35" + }, + { + "type": "ref_text", + "bbox": [ + 0.525, + 0.591, + 0.905, + 0.66 + ], + "angle": 0, + "content": "[89] Bin Ren, Yahui Liu, Yue Song, Wei Bi, Rita Cucchiara, Nicu Sebe, and Wei Wang. Masked jigsaw puzzle: A versatile position embedding for vision transformers. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 20382-20391, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.525, + 0.661, + 0.905, + 0.732 + ], + "angle": 0, + "content": "[90] Bin Ren, Yawei Li, Jingyun Liang, Rakesh Ranjan, Mengyuan Liu, Rita Cucchiara, Luc V Gool, Ming-Hsuan Yang, and Nicu Sebe. Sharing key semantics in transformer makes efficient image restoration. Advances in Neural Information Processing Systems, 37:7427-7463, 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.525, + 0.733, + 0.905, + 0.829 + ], + "angle": 0, + "content": "[91] Bin Ren, Yawei Li, Nancy Mehta, Radu Timofte, Hongyuan Yu, Cheng Wan, Yuxin Hong, Bingnan Han, Zhuoyuan Wu, Yajun Zou, et al. The ninth nitire 2024 efficient super-resolution challenge report. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6595-6631, 2024. 2, 3, 4, 6, 17, 21, 35, 38" + }, + { + "type": "ref_text", + "bbox": [ + 0.525, + 0.831, + 0.905, + 0.901 + ], + "angle": 0, + "content": "[92] Bin Ren, Hang Guo, Lei Sun, Zongwei Wu, Radu Timofte, Yawei Li, et al. The tenth NTIRE 2025 efficient super-resolution challenge report. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + }, + { + "type": "list", + "bbox": [ + 0.524, + 0.093, + 0.906, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.925, + 0.508, + 0.936 + ], + "angle": 0, + "content": "47" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.101, + 0.093, + 0.482, + 0.176 + ], + "angle": 0, + "content": "[93] Nickolay Safonov, Alexey Bryntsev, Andrey Moskalenko, Dmitry Kulikov, Dmitriy Vatolin, Radu Timofte, et al. NTIRE 2025 challenge on UGC video enhancement: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.101, + 0.177, + 0.482, + 0.26 + ], + "angle": 0, + "content": "[94] Wenzhe Shi, Jose Caballero, Ferenc Huszár, Johannes Totz, Andrew P Aitken, Rob Bishop, Daniel Rueckert, and Zehan Wang. Real-time single image and video super-resolution using an efficient sub-pixel convolutional neural network. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 1874-1883, 2016. 25" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.261, + 0.482, + 0.315 + ], + "angle": 0, + "content": "[95] Long Sun, Jinshan Pan, and Jinhui Tang. Shufflemixer: An efficient convnet for image super-resolution. Advances in Neural Information Processing Systems, 35:17314-17326, 2022. 29" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.316, + 0.482, + 0.357 + ], + "angle": 0, + "content": "[96] Long Sun, Jiangxin Dong, Jinhui Tang, and Jinshan Pan. Spatially-adaptive feature modulation for efficient image super-resolution. In ICCV, 2023. 17" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.359, + 0.482, + 0.44 + ], + "angle": 0, + "content": "[97] Lei Sun, Andrea Alfarano, Peiqi Duan, Shaolin Su, Kaiwei Wang, Boxin Shi, Radu Timofte, Danda Pani Paudel, Luc Van Gool, et al. NTIRE 2025 challenge on event-based image deblurring: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.442, + 0.482, + 0.51 + ], + "angle": 0, + "content": "[98] Lei Sun, Hang Guo, Bin Ren, Luc Van Gool, Radu Timofte, Yawei Li, et al. The tenth ntire 2025 image denoising challenge report. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.512, + 0.482, + 0.594 + ], + "angle": 0, + "content": "[99] Yunlong Tang, Junjia Guo, Pinxin Liu, Zhiyuan Wang, Hang Hua, Jia-Xing Zhong, Yunzhong Xiao, Chao Huang, Luchuan Song, Susan Liang, Yizhi Song, Liu He, Jing Bi, Mingqian Feng, Xinyang Li, Zeliang Zhang, and Chen-liang Xu. Generative ai for cel-animation: A survey. arXiv preprint arXiv:2501.06250, 2025. 14" + }, + { + "type": "ref_text", + "bbox": [ + 0.094, + 0.595, + 0.482, + 0.65 + ], + "angle": 0, + "content": "[100] Radu Timofte, Eirikur Agustsson, Luc Van Gool, Ming-Hsuan Yang, and Lei Zhang. Ntire 2017 challenge on single image super-resolution: Methods and results. In CVPR Workshops, 2017. 10, 17" + }, + { + "type": "ref_text", + "bbox": [ + 0.094, + 0.651, + 0.482, + 0.719 + ], + "angle": 0, + "content": "[101] Radu Timofte, Eirikur Agustsson, Luc Van Gool, Ming-Hsuan Yang, and Lei Zhang. Ntire 2017 challenge on single image super-resolution: Methods and results. In Proceedings of the IEEE conference on computer vision and pattern recognition workshops, pages 114-125, 2017. 23, 33" + }, + { + "type": "ref_text", + "bbox": [ + 0.094, + 0.721, + 0.482, + 0.775 + ], + "angle": 0, + "content": "[102] Radu Timofte, Eirikur Agustsson, Luc Van Gool, Ming-Hsuan Yang, and Lei Zhang. Ntire 2017 challenge on single image super-resolution: Methods and results. In CVPR workshops, pages 114-125, 2017. 12, 30" + }, + { + "type": "ref_text", + "bbox": [ + 0.094, + 0.777, + 0.482, + 0.831 + ], + "angle": 0, + "content": "[103] Radu Timofte, Eirikur Agustsson, Luc Van Gool, Ming-Hsuan Yang, Lei Zhang, et al. NTIRE 2017 challenge on single image super-resolution: Methods and results. In CVPR Workshops, 2017. 17, 28" + }, + { + "type": "ref_text", + "bbox": [ + 0.094, + 0.832, + 0.482, + 0.899 + ], + "angle": 0, + "content": "[104] Radu Timofte, Eirikur Agustsson, Shuhang Gu, J Wu, A Ignatov, and L Van Gool. Div2k dataset: Diverse 2k resolution high quality images as used for the challenges@ ntire (cvpr 2017 and cvpr 2018) and@ pirm (eccv 2018), 2018. 24, 36" + }, + { + "type": "list", + "bbox": [ + 0.094, + 0.093, + 0.482, + 0.899 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.093, + 0.906, + 0.162 + ], + "angle": 0, + "content": "[105] Florin-Alexandru Vasluianu, Tim Seizinger, Zhuyun Zhou, Caitian Chen, Zongwei Wu, Radu Timofte, et al. NTIRE 2025 image shadow removal challenge report. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.164, + 0.907, + 0.233 + ], + "angle": 0, + "content": "[106] Florin-Alexandru Vasluianu, Tim Seizinger, Zhuyun Zhou, Zongwei Wu, Radu Timofte, et al. NTIRE 2025 ambient lighting normalization challenge. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.235, + 0.906, + 0.29 + ], + "angle": 0, + "content": "[107] Pavan Kumar Anasosalu Vasu, James Gabriel, Jeff Zhu, Oncel Tuzel, and Anurag Ranjan. An improved one millisecond mobile backbone. arXiv preprint arXiv:2206.04040, 2022. 9" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.293, + 0.906, + 0.36 + ], + "angle": 0, + "content": "[108] Cheng Wan, Hongyuan Yu, Zhiqi Li, Yihang Chen, Yajun Zou, Yuqing Liu, Xuanwu Yin, and Kunlong Zuo. Swift parameter-free attention network for efficient superresolution. arXiv preprint arXiv:2311.12770, 2023. 34, 35, 38" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.363, + 0.906, + 0.445 + ], + "angle": 0, + "content": "[109] Cheng Wan, Hongyuan Yu, Zhiqi Li, Yihang Chen, Ya-jun Zou, Yuqing Liu, Xuanwu Yin, and Kunlong Zuo. Swift parameter-free attention network for efficient super-resolution. In 2024 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW), pages 6246-6256, 2024. 12, 13" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.448, + 0.906, + 0.517 + ], + "angle": 0, + "content": "[110] Cheng Wan, Hongyuan Yu, Zhiqi Li, Yihang Chen, Yajun Zou, Yuqing Liu, Xuanwu Yin, and Kunlong Zuo. Swift parameter-free attention network for efficient superresolution. In IEEE Conf. Comput. Vis. Pattern Recog. Worksh., 2024. NTIRE 2024 ESR Challenge. 21" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.519, + 0.906, + 0.602 + ], + "angle": 0, + "content": "[111] Cheng Wan, Hongyuan Yu, Zhiqi Li, Yihang Chen, Yajun Zou, Yuqing Liu, Xuanwu Yin, and Kunlong Zuo. Swift parameter-free attention network for efficient superresolution. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6246-6256, 2024. 9, 20" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.604, + 0.906, + 0.688 + ], + "angle": 0, + "content": "[112] Cheng Wan, Hongyuan Yu, Zhiqi Li, Yihang Chen, Yajun Zou, Yuqing Liu, Xuanwu Yin, and Kunlong Zuo. Swift parameter-free attention network for efficient superresolution. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2024. 7, 8, 14, 20, 21, 23, 24, 26, 33, 36" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.689, + 0.906, + 0.757 + ], + "angle": 0, + "content": "[113] Hang Wang, Xuanhong Chen, Bingbing Ni, Yutian Liu, and Jinfan Liu. Omni aggregation networks for lightweight image super-resolution. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 22378-22387, 2023. 17" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.76, + 0.906, + 0.843 + ], + "angle": 0, + "content": "[114] Hongyuan Wang, Ziyan Wei, Qingting Tang, Shuli Cheng, Liejun Wang, and Yongming Li. Attention guidance distillation network for efficient image super-resolution. In 2024 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW), pages 6287-6296, 2024. 12, 13, 28" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.845, + 0.906, + 0.901 + ], + "angle": 0, + "content": "[115] Xintao Wang, Liangbin Xie, Ke Yu, Kelvin C.K. Chan, Chen Change Loy, and Chao Dong. BasicSR: Open source image and video restoration toolbox. https://github.com/XPixelGroup/BasicSR, 2022.29" + }, + { + "type": "list", + "bbox": [ + 0.517, + 0.093, + 0.907, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.925, + 0.508, + 0.936 + ], + "angle": 0, + "content": "48" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.093, + 0.486, + 0.148 + ], + "angle": 0, + "content": "[116] Yan Wang. Edge-enhanced feature distillation network for efficient super-resolution. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, pages 777-785, 2022. 2, 3, 4, 18, 38" + }, + { + "type": "ref_text", + "bbox": [ + 0.091, + 0.148, + 0.486, + 0.175 + ], + "angle": 0, + "content": "[117] Yan Wang. Edge-enhanced feature distillation network for efficient super-resolution, 2022. 37" + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.177, + 0.485, + 0.232 + ], + "angle": 0, + "content": "[118] Yucong Wang and Minjie Cai. A single residual network with eta modules and distillation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1970-1980, 2023. 18" + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.233, + 0.484, + 0.302 + ], + "angle": 0, + "content": "[119] Yan Wang, Yusen Li, Gang Wang, and Xiaoguang Liu. Multi-scale attention network for single image superresolution. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2024. 28" + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.302, + 0.484, + 0.344 + ], + "angle": 0, + "content": "[120] Yan Wang, Yusen Li, Gang Wang, and Xiaoguang Liu. Pla-nusr: Chasing faster convnet for efficient super-resolution. arXiv preprint arXiv:2409.13435, 2024. 26" + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.344, + 0.484, + 0.428 + ], + "angle": 0, + "content": "[121] Yingqian Wang, Zhengyu Liang, Fengyuan Zhang, Lvli Tian, Longguang Wang, Juncheng Li, Jungang Yang, Radu Timofte, Yulan Guo, et al. NTIRE 2025 challenge on light field image super-resolution: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.428, + 0.484, + 0.482 + ], + "angle": 0, + "content": "[122] Gang Wu, Junjun Jiang, Junpeng Jiang, and Xianming Liu. Transforming image super-resolution: A convformer-based efficient approach. IEEE Transactions on Image Processing, 2024. 27, 28" + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.484, + 0.485, + 0.552 + ], + "angle": 0, + "content": "[123] Chengxing Xie, Xiaoming Zhang, Linze Li, Yuqian Fu, Biao Gong, Tianrui Li, and Kai Zhang. Mat: Multi-range attention transformer for efficient image super-resolution. IEEE Transactions on Circuits and Systems for Video Technology, 2025. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.553, + 0.485, + 0.621 + ], + "angle": 0, + "content": "[124] Xingyu Xie, Pan Zhou, Huan Li, Zhouchen Lin, and Shuicheng Yan. Adan: Adaptive nesterov momentum algorithm for faster optimizing deep models. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2024. 26" + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.622, + 0.486, + 0.719 + ], + "angle": 0, + "content": "[125] Kangning Yang, Jie Cai, Ling Ouyang, Florin-Alexandru Vasluianu, Radu Timofte, Jiaming Ding, Huiming Sun, Lan Fu, Jinlong Li, Chiu Man Ho, Zibo Meng, et al. NTIRE 2025 challenge on single image reflection removal in the wild: Datasets, methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.72, + 0.485, + 0.775 + ], + "angle": 0, + "content": "[126] Lingxiao Yang, Ru-Yuan Zhang, Lida Li, and Xiaohua Xie. Simam: A simple, parameter-free attention module for convolutional neural networks. In International conference on machine learning, pages 11863-11874. PMLR, 2021. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.776, + 0.485, + 0.859 + ], + "angle": 0, + "content": "[127] Kihwan Yoon, Ganzorig Gankhuyag, Jinman Park, Haengseon Son, and Kyoungwon Min. Casr: Efficient cascade network structure with channel aligned method for 4k real-time single image super-resolution. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7911-7920, 2024. 21" + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.86, + 0.485, + 0.901 + ], + "angle": 0, + "content": "[128] Lei Yu, Xinpeng Li, Youwei Li, Ting Jiang, Qi Wu, Haoqiang Fan, and Shuaicheng Liu. Dipnet: Efficiency distillation and iterative pruning for image super-resolution. In" + }, + { + "type": "list", + "bbox": [ + 0.091, + 0.093, + 0.486, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.554, + 0.093, + 0.907, + 0.133 + ], + "angle": 0, + "content": "Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1692-1701, 2023. 15, 16" + }, + { + "type": "ref_text", + "bbox": [ + 0.516, + 0.136, + 0.907, + 0.204 + ], + "angle": 0, + "content": "[129] Xiyu Yu, Tongliang Liu, Xinchao Wang, and Dacheng Tao. On compressing deep models by low rank and sparse decomposition. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 7370-7379, 2017. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.516, + 0.206, + 0.907, + 0.302 + ], + "angle": 0, + "content": "[130] Pierluigi Zama Ramirez, Fabio Tosi, Luigi Di Stefano, Radu Timofte, Alex Costanzino, Matteo Poggi, Samuele Salti, Stefano Mattoccia, et al. NTIRE 2025 challenge on hr depth from images of specular and transparent surfaces. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.516, + 0.304, + 0.906, + 0.36 + ], + "angle": 0, + "content": "[131] Eduard Zamfir, Zongwei Wu, Nancy Mehta, Yulun Zhang, and Radu Timofte. See more details: Efficient image superresolution by experts mining. In *Forty-first International Conference on Machine Learning*, 2024. 29" + }, + { + "type": "ref_text", + "bbox": [ + 0.516, + 0.361, + 0.906, + 0.416 + ], + "angle": 0, + "content": "[132] Syed Waqas Zamir, Aditya Arora, Salman Khan, Munawar Hayat, Fahad Shahbaz Khan, and Ming-Hsuan Yang. Restormer: Efficient transformer for high-resolution image restoration. In CVPR, 2022. 10, 28" + }, + { + "type": "ref_text", + "bbox": [ + 0.516, + 0.418, + 0.906, + 0.473 + ], + "angle": 0, + "content": "[133] Dafeng Zhang, Feiyu Huang, Shizhuo Liu, Xiaobing Wang, and Zhezhu Jin. Swinfir: Revisiting the swinir with fast fourier convolution and improved training for image super-resolution, 2022. 14" + }, + { + "type": "ref_text", + "bbox": [ + 0.516, + 0.475, + 0.906, + 0.517 + ], + "angle": 0, + "content": "[134] Xiang Zhang. Hit-sr: Hierarchical transformer for efficient image super-resolution. https://github.com/XiangZ-0/HiT-SR, 2024. GitHub repository. 33" + }, + { + "type": "ref_text", + "bbox": [ + 0.516, + 0.518, + 0.906, + 0.574 + ], + "angle": 0, + "content": "[135] Xiangyu Zhang, Xinyu Zhou, Mengxiao Lin, and Jian Sun. Shufflenet: An extremely efficient convolutional neural network for mobile devices. Proceedings of the IEEE conference on computer vision and pattern recognition, 2018. 37" + }, + { + "type": "ref_text", + "bbox": [ + 0.516, + 0.575, + 0.906, + 0.644 + ], + "angle": 0, + "content": "[136] Xindong Zhang, Hui Zeng, and Lei Zhang. Edge-oriented convolution block for real-time super resolution on mobile devices. In MM '21: ACM Multimedia Conference, Virtual Event, China, October 20 - 24, 2021, pages 4034-4043. ACM, 2021. 19" + }, + { + "type": "ref_text", + "bbox": [ + 0.516, + 0.645, + 0.906, + 0.701 + ], + "angle": 0, + "content": "[137] Xindong Zhang, Huiyu Zeng, and Lei Zhang. Edge-oriented convolution block for real-time super resolution on mobile devices. Proceedings of the 29th ACM International Conference on Multimedia, 2021. 19" + }, + { + "type": "ref_text", + "bbox": [ + 0.516, + 0.702, + 0.906, + 0.758 + ], + "angle": 0, + "content": "[138] Xindong Zhang, Hui Zeng, and Lei Zhang. Edge-oriented convolution block for real-time super resolution on mobile devices. In Proceedings of the 29th ACM International Conference on Multimedia, pages 4034-4043, 2021. 3, 21" + }, + { + "type": "ref_text", + "bbox": [ + 0.516, + 0.759, + 0.906, + 0.815 + ], + "angle": 0, + "content": "[139] Xiang Zhang, Yulun Zhang, and Fisher Yu. Hit-sr: Hierarchical transformer for efficient image super-resolution. In European Conference on Computer Vision, pages 483-500. Springer, 2024. 30" + }, + { + "type": "ref_text", + "bbox": [ + 0.516, + 0.816, + 0.906, + 0.858 + ], + "angle": 0, + "content": "[140] Xiang Zhang, Yulun Zhang, and Fisher Yu. Hit-sr: Hierarchical transformer for efficient image super-resolution. arXiv preprint, arXiv:2407.05878, 2024. 33" + }, + { + "type": "ref_text", + "bbox": [ + 0.516, + 0.859, + 0.907, + 0.901 + ], + "angle": 0, + "content": "[141] Yulun Zhang, Kai Zhang, Zheng Chen, Yawei Li, Radu Timofte, et al. NTIRE 2023 challenge on image superresolution (x4): Methods and results. In Proceedings of" + }, + { + "type": "list", + "bbox": [ + 0.516, + 0.093, + 0.907, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.925, + 0.509, + 0.937 + ], + "angle": 0, + "content": "49" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.133, + 0.093, + 0.482, + 0.12 + ], + "angle": 0, + "content": "the IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops, 2023. 30" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.122, + 0.483, + 0.178 + ], + "angle": 0, + "content": "[142] Hengyuan Zhao, Xiangtao Kong, Jingwen He, Yu Qiao, and Chao Dong. Efficient image super-resolution using pixel attention. In European Conference on Computer Vision, pages 56-72. Springer, 2020. 26" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.179, + 0.482, + 0.261 + ], + "angle": 0, + "content": "[143] Mengyi Zhao, Mengyuan Liu, Bin Ren, Shuling Dai, and Nicu Sebe. Denoising diffusion probabilistic models for action-conditioned 3d motion generation. In ICASSP 2024-2024 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 4225-4229. IEEE, 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.263, + 0.482, + 0.317 + ], + "angle": 0, + "content": "[144] Mingjun Zheng, Long Sun, Jiangxin Dong, and Jinshan Pan. Smfanet: A lightweight self-modulation feature aggregation network for efficient image super-resolution. In ECCV, 2024. 10, 17, 28" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.32, + 0.482, + 0.389 + ], + "angle": 0, + "content": "[145] Mingjun Zheng, Long Sun, Jiangxin Dong, and Jinshan Pan. Smfanet: A lightweight self-modulation feature aggregation network for efficient image super-resolution. In European Conference on Computer Vision, pages 359-375. Springer, 2024. 29" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.39, + 0.482, + 0.444 + ], + "angle": 0, + "content": "[146] Xu Zheng, Yunhao Luo, Pengyuan Zhou, and Lin Wang. Distilling efficient vision transformers from cnns for semantic segmentation. Pattern Recognition, 158:111029, 2025. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.446, + 0.482, + 0.515 + ], + "angle": 0, + "content": "[147] Yupeng Zhou, Zhen Li, Chun-Le Guo, Song Bai, Ming-Ming Cheng, and Qibin Hou. Srformer: Permuted self-attention for single image super-resolution. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 12780–12791, 2023. 30, 33, 34" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.517, + 0.482, + 0.585 + ], + "angle": 0, + "content": "[148] Lianghui Zhu, Bencheng Liao, Qian Zhang, Xinlong Wang, Wenyu Liu, and Xinggang Wang. Vision mamba: Efficient visual representation learning with bidirectional state space model. In *Forty-first International Conference on Machine Learning*, 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.587, + 0.482, + 0.655 + ], + "angle": 0, + "content": "[149] Qiang Zhu, Pengfei Li, and Qianhui Li. Attention retractable frequency fusion transformer for image super resolution. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1756-1763, 2023. 30" + }, + { + "type": "list", + "bbox": [ + 0.093, + 0.093, + 0.483, + 0.655 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.925, + 0.509, + 0.937 + ], + "angle": 0, + "content": "50" + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_10xxx/2504.10686/02e14e26-d981-43b7-bd68-0bb6d5c44d72_origin.pdf b/data/2025/2504_10xxx/2504.10686/02e14e26-d981-43b7-bd68-0bb6d5c44d72_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..0b1022d4a7de2db3f756f1c04eccc7abe9e954e8 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/02e14e26-d981-43b7-bd68-0bb6d5c44d72_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2b0bc4b4ae16d275d59143ab7612b2fa866aa3257d1af87d9c7f0cecf33749bd +size 10359035 diff --git a/data/2025/2504_10xxx/2504.10686/full.md b/data/2025/2504_10xxx/2504.10686/full.md new file mode 100644 index 0000000000000000000000000000000000000000..7f19a75651cf87d8cdefb85494c717495100903c --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/full.md @@ -0,0 +1,1890 @@ +# The Tenth NTIRE 2025 Efficient Super-Resolution Challenge Report + +
Bin Ren*Hang Guo*Lei Sun*Zongwei Wu*Radu Timofte*Yawei Li*
Yao ZhangXinning ChaiZhengxue ChengYingsheng QinYucai Yang
Li SongHongyuan YuPufan XuCheng WanZhijuan HuangPeng Guo
Shuyuan CuiChenjun LiXuehai HuPan PanXin ZhangHeng Zhang
Qing LuoLinyan JiangHaibo LeiQifang GaoYaqing LiWeihua Luo
Tsing LiQing WangYi LiuYang WangHongyu AnLiou Zhang
Shijie ZhaoLianhong SongLong SunJinshan PanJiangxin DongJinhui Tang
Jing WeiMengyang WangRuilong GuoQian WangQingliang Liu
Yang ChengDavinciEnxuan GuPinxin LiuYongsheng YuHang Hua
Yunlong TangShihao WangYukun YangZhiyu ZhangYukun YangJiyu Wu
Jiancheng HuangYifan LiuYi HuangShifeng ChenRui ChenYi Feng
Mingxi LiCailu WanXiangji WuZibin LiuJinyang ZhongKihwan Yoon
Ganzorig GankhuyagShengyun ZhongMingyang WuRenjie LiYushen Zuo
Zhengzhong TuZongang GaoGuannan ChenYuan TianWenhui Chen
Weijun YuanZhan LiYihang ChenYifan DengRuting DengYilin Zhang
Huan ZhengYanyan WeiWenxuan ZhaoSuiyi ZhaoFei WangKun Li
Yinggan TangMengjie SuJae-hyeon LeeDong-Hyeop SonUi-Jin Choi
Tiancheng ShaoYuqing ZhangMengcheng MaDonggeun KoYoungsang Kwak
Jiun LeeJaehwa KwakYuxuan JiangQiang ZhuSiyue TengFan Zhang
Shuyuan ZhuBing ZengDavid BullJing HuHui DengXuan Zhang
Lin ZhuQinrui FanWeijian DengJunnan WuWenqin DengYuquan Liu
Zhaohong XuJameer Babu PinjariKuldeep PurohitZeyu XiaoZhuoyuan Li
Surya VashisthAkshay DudhanePraful HambardeSachin Chaudhary
Satya Naryan TaziPrashant PatilSantosh Kumar VipparthiSubrahmanyam Murala
Wei-Chen ShenI-Hsiang ChenYunzhe XuChen ZhaoZhizhou Chen
Akram Khatami-RiziAhmad Mahmoudi-AznavehAlejandro MerinoBruno Longarela
Javier AbadMarcos V. CondeSimone BiancoLuca CogoGianmarco Corti
+ +# Abstract + +This paper presents a comprehensive review of the NTIRE 2025 Challenge on Single-Image Efficient Super-Resolution + +(ESR). The challenge aimed to advance the development of deep models that optimize key computational metrics, i.e., runtime, parameters, and FLOPs, while achieving a PSNR of at least 26.90 dB on the DIV2K_LSDIR_valid dataset and 26.99 dB on the DIV2K_LSDIR_test dataset. A robust participation saw 244 registered entrants, with 43 teams submitting valid entries. This report meticulously analyzes these methods and results, emphasizing groundbreaking advancements in state-of-the-art single-image ESR techniques. The analysis highlights innovative approaches and establishes benchmarks for future research in the field. + +# 1. Introduction + +Single image super-resolution (SR) is designed to reconstruct a high-resolution (HR) image from a single low-resolution (LR) image, typically affected by blurring and down-sampling. The standard degradation model in traditional SR, bicubic down-sampling, allows for consistent benchmarks and systematic comparisons among different SR methods. This framework also serves as a platform to highlight the advances in SR technologies. SR techniques are widely used in fields such as satellite imaging, medical image enhancement, and surveillance, where improved image quality is essential for accurate interpretation and analysis. + +State-of-the-art deep neural networks for image superresolution (SR) often suffer from overparameterization, intensive computation, and high latency, making their deployment on mobile devices for real-time SR applications challenging. To address these limitations, extensive research has focused on improving network efficiency through techniques such as network pruning, low-rank filter decomposition, network quantization, neural architecture search, state space modeling, diffusion priors, and knowledge distillation [76, 79, 89, 90, 129, 143, 146, 148]. These compression methods, successfully applied to image SR, optimize both the computational footprint and the operational speed [8, 91, 123]. + +Efficient SR is particularly crucial for edge computing and mobile devices, where processing power, energy availability, and memory are limited. The enhanced efficiency of SR models ensures that these devices can execute high-quality image processing in real-time without exhausting system resources or draining battery life rapidly. Metrics like runtime, parameter count, and computational complexity (FLOPs) are vital for assessing the suitability of SR models for edge deployment. These parameters are key in maintaining a balance between performance and resource use, ensuring that mobile devices can deliver advanced imaging capabilities efficiently. This balance is critical for the widespread adoption of advanced SR techniques in everyday applications, driving the development of AI-enabled technologies that are both powerful and accessible. + +In collaboration with the 2025 New Trends in Image Restoration and Enhancement (NTIRE 2025) workshop, we organize the challenge on single-image efficient superresolution. The challenge's goal is to super-resolve an LR image with a magnification factor of $\times 4$ using a network that reduces aspects such as runtime, parameters, FLOPs, of EFDN [116], while at least maintaining the $26.90~\mathrm{dB}$ on the DIV2K_LSDIR_valid dataset, and $26.99\mathrm{dB}$ on the DIV2K_LSDIR_test dataset. This challenge aims to discover advanced and innovative solutions for efficient SR, benchmark their efficiency, and identify general trends for the design of future efficient SR networks. + +This challenge is one of the NTIRE 2025 Workshop associated challenges on: ambient lighting normalization [106], reflection removal in the wild [125], shadow removal [105], event-based image deblurring [97], image denoising [98], XGC quality assessment [74], UGC video enhancement [93], night photography rendering [28], image super-resolution (x4) [12], real-world face restoration [13], efficient super-resolution [92], HR depth estimation [130], efficient burst HDR and restoration [58], cross-domain few-shot object detection [29], short-form UGC video quality assessment and enhancement [62, 63], text to image generation model quality assessment [36], day and night rain-drop removal for dual-focused images [61], video quality assessment for video conferencing [47], low light image enhancement [75], light field super-resolution [121], restore any image model (RAIM) in the wild [68], raw restoration and super-resolution [16] and raw reconstruction from RGB on smartphones [17]. + +# 2. NTIRE 2025 Efficient Super-Resolution Challenge + +The goals of this challenge include: (i) promoting research in the area of single-imae efficient super-resolution, (ii) facilitating comparisons between the efficiency of various methods, and (iii) providing a platform for academic and industrial participants to engage, discuss, and potentially establish collaborations. This section delves into the specifics of the challenge. + +# 2.1. Dataset + +The DIV2K [4] dataset and LSDIR [64] dataset are utilized for this challenge. The DIV2K dataset consists of 1,000 diverse 2K resolution RGB images, which are split into a training set of 800 images, a validation set of 100 images, and a test set of 100 images. The LSDIR dataset contains 86,991 high-resolution high-quality images, which are split into a training set of 84,991 images, a validation set of 1,000 images, and a test set of 1,000 images. In this challenge, the corresponding LR DIV2K images are generated by bicubic downsampling with a down-scaling factor of $4 \times$ . The training images from DIV2K and LSDIR are provided to the participants of the challenge. During the validation phase, 100 images from the DIV2K validation set and 100 images from the LSDIR validation set are made available to participants. During the test phase, 100 images from the DIV2K test set and another 100 images from the LSDIR test set are used. Throughout the entire challenge, the testing HR images remain hidden from the participants. + +# 2.2. EFDN Baseline Model + +The Edge-Enhanced Feature Distillation Network (EFDN) [116] serves as the baseline model in this challenge. The aim is to improve its efficiency in terms of runtime, number of parameters, and FLOPs, while at least maintaining 26.90 dB on the DIV2K_LSDIR_valid dataset and 26.99 dB on the DIV2K_LSDIR_test dataset. + +The main idea within EFDN is a combination of block composing, architecture searching, and loss designing to obtain a trade-off between performance and lightweighting. Especially, For block composing, EFDN sum up the re-parameterization methods [20, 21, 138] and designs a more effective and complex edge-enhanced diverse branch block. In detail, they employ several reasonable reparameterizable branches to enhance the structural information extraction, and then they integrate them into a vanilla convolution to maintain the inference performance. To ensure the effective optimization of parallel branches in EDBB, they designed an edge-enhanced gradient-variance loss (EG) based on the gradient-variance loss [1]. The proposed loss enforces minimizing the difference between the computed variance maps, which is helpful to restore sharper edges. The gradient maps calculated by different filters and the corresponding EG loss. In addition, the NAS strategy of DLSR is adopted to search for a robust backbone. + +The baseline EFDN emerges as the 1st place for the overall performance of the NTIRE2023 Efficient SR Challenge [116]. The quantitative performance and efficiency metrics of EFDN are summarized as follows: (1) The number of parameters is $0.276\mathrm{M}$ . (2) The average PSNRs on validation (DIV2K 100 valid images and LSDIR 100 valid images) and testing (DIV2K 100 test images and LSDIR 100 test images) sets of this challenge are 26.93 dB and 27.01 dB, respectively. (3) The runtime averaged to 22.18ms on the validation and test set with PyTorch $2.0.0 + \mathrm{cu}118$ , and a single NVIDIA RTX A6000 GPU. (4) The number of FLOPs for an input of size $256\times 256$ is $16.70\mathrm{G}$ . + +# 2.3. Tracks and Competition + +The aim of this challenge is to devise a network that reduces one or several aspects such as runtime, parameters, and FLOPs, while at least maintaining the 26.90 dB on the DIV2K_LSDIR valid dataset, and 26.99 dB on the DIV2K_LSDIR test dataset. + +Challenge phases: (1) Development and validation phase: Participants were given access to 800 LR/HR training image pairs and 200 LR/HR validation image pairs from the DIV2K and the LSDIR datasets. An additional 84,991 LR/HR training image pairs from the LSDIR dataset are also provided to the participants. The EFDN model, pretrained parameters, and validation demo script are available + +on GitHub https://github.com/Amazingren/NTIRE2025_ESR, allowing participants to benchmark their models' runtime on their systems. Participants could upload their HR validation results to the evaluation server to calculate the PSNR of the super-resolved image produced by their models and receive immediate feedback. The corresponding number of parameters, FLOPs, and runtime will be computed by the participants. (2) Testing phase: In the final test phase, participants were granted access to 100 LR testing images from DIV2K and 100 LR testing images from LSDIR, while the HR ground-truth images remained hidden. Participants submitted their super-resolved results to the Codalab evaluation server and emailed the code and factsheet to the organizers. The organizers verified and ran the provided code to obtain the final results, which were then shared with participants at the end of the challenge. + +Evaluation protocol: Quantitative evaluation metrics included validation and testing PSNRs, runtime, FLOPs, and the number of parameters during inference. PSNR was measured by discarding a 4-pixel boundary around the images. The average runtime during inference was computed on the 200 LR validation images and the 200 LR testing images. The average runtime on the validation and testing sets served as the final runtime indicator. FLOPs are evaluated on an input image of size $256 \times 256$ . Among these metrics, runtime was considered the most important. Participants were required to maintain a PSNR of at least 26.90 dB on the DIV2K_LSDIR valid dataset, and 26.99 dB on the DIV2K_LSDIR test dataset during the challenge. The constraint on the testing set helped prevent overfitting on the validation set. It's important to highlight that methods with a PSNR below the specified threshold (i.e., 26.90 dB on DIV2K_LSDIR_valid and, 26.99 dB on DIV2K_LSDIR_test) will not be considered for the subsequent ranking process. It is essential to meet the minimum PSNR requirement to be eligible for further evaluation and ranking. A code example for calculating these metrics is available at https://github.com/Amazingren/NTIRE2025_ESR. + +To better quantify the rankings, we followed the scoring function from NTIRE2024 ESR [91] for three evaluation metrics in this challenge: runtime, FLOPs, and parameters. This scoring aims to convert the performance of each metric into corresponding scores to make the rankings more significant. Especially, the score for each separate metric (i.e., Runtime, FLOPs, and parameter) for each sub-track is calculated as: + +$$ +\text {S c o r e} _ {\text {M e t r i c}} = \frac {\operatorname {E x p} (2 \times \operatorname {M e t r i c} _ {\text {T e a m X}})}{\operatorname {M e t r i c} _ {\text {B a s e l i n e}}}, \tag {1} +$$ + +based on the score of each metric, the final score used for + +the main track is calculated as: + +$$ +\begin{array}{l} \text {S c o r e} = w _ {1} \times \text {S c o r e} \\ + w _ {2} \times S c o r e \_ F L O P s \tag {2} \\ + w _ {3} \times S c o r e \_ P a r a m s, \\ \end{array} +$$ + +where $w_{1}, w_{2}$ , and $w_{3}$ are set to 0.7, 0.15, and 0.15, respectively. This setting is intended to incentivize participants to design a method that prioritizes speed efficiency while maintaining a reasonable model complexity. + +# 3. Challenge Results + +The final challenge results and the corresponding rankings are presented in Tab. 1 The table also includes the baseline method EFDN [116] for comparison. In Sec.4, the methods evaluated in Tab. 1 are briefly explained, while the team members are listed in A. The performance of different methods is compared from four different perspectives, including the runtime, FLOPs, the parameters, and the overall performance. Furthermore, in order to promote a fair competition emphasizing efficiency, the criteria for image reconstruction quality in terms of test PSNR are set to 26.90 and 26.99 on the DIV2K_LSDIR_valid and DIV2K_LSDIR_test sets, respectively. + +Runtime. In this challenge, runtime stands as the paramount evaluation metric. ShannonLab's solution emerges as the frontrunner with the shortest runtime among all entries in the efficient SR challenge, securing its top-3 ranking position. Following closely, the TSSR and mbga claim the second and third spots, respectively. Remarkably, the average runtime of the top three solutions on both the validation and test sets remains below $10\mathrm{ms}$ . Impressively, the first 13 teams present solutions with an average runtime below $16\mathrm{ms}$ , showcasing a continuous enhancement in the efficiency of image SR networks. Despite the slight differences in runtime among the top three teams, the challenge retains its competitive edge. An additional distinction from previous challenges worth noting is that this year, runtime performance no longer predominantly dictates the overall rankings as it has in the past, where the top three solutions in terms of runtime were also the top performers in the main track (e.g., from NTIRE ESR 2024 [91]). This shift indicates that participants are now emphasizing a more balanced approach, focusing not only on runtime optimization but also on improving the comprehensive performance of their models + +Parameters. Model complexity was further evaluated by considering the number of parameters, as detailed in Table 1. In this sub-track, VEPG_C achieved the top position with only 0.044M parameters, closely followed by HannahSR and XUPTBoys with 0.060M and 0.072M parameters, respectively. The minimal disparity among the top three methods highlights their competitive edge and efficiency in managing model complexity. They were scored + +at 1.38, 1.54, and 1.68, respectively, indicating a tight competition. However, it is noteworthy that these models also exhibited relatively high runtimes, suggesting an area for potential improvement in future iterations. + +FLOPs. The number of floating-point operations (FLOPs) is another critical metric for assessing model complexity. Within this sub-track, VEPG_C, XUPTBoys, and HannahSR secured the top three positions with FLOPs of 3.13G, 3.39G, and 3.75G, respectively. The competitiveness of this sub-track is further confirmed by the close scores of 1.45, 1.50, and 1.57, aligned with the parameter evaluation results. Remarkably, the same models top both the parameters and FLOPs evaluations, demonstrating consistent performance across different complexity metrics. Similar to the parameters sub-track, the extended runtimes of these methods point to a need for further research and optimization. Key implications include: i) Efficiency vs. Performance Trade-off: The close competition among the top models in terms of parameters and FLOPs suggests a significant trade-off between model efficiency and performance. Despite achieving minimal parameter counts and FLOPs, the high runtimes indicate that these models might be optimizing computational complexity at the expense of execution speed. This raises important considerations for future research in balancing efficiency with real-world usability, especially in applications where inference speed is critical. ii) Potential for Model Optimization: The consistency in ranking between the parameters and FLOPs sub-tracks reveals that models which are optimized for one aspect of computational efficiency tend to perform well in others. However, the noted high runtimes across these models suggest an untapped potential for holistic model optimization. Future work could focus on integrating more advanced optimization techniques or exploring novel architectural innovations to enhance both the computational efficiency and runtime performance. + +Overall Evaluation. The final assessment of performance employs a comprehensive metric that synthesizes runtime, FLOPs, and the number of parameters into a unified score. In this rigorous evaluation, the EMSR Group excelled, claiming the prestigious top position, followed by XiaomiMM (the winner of the NTIRE ESR 2024 challenge) and ShannonLab in second and third places, respectively. This achievement highlights the sophisticated engineering and innovative approaches implemented by these groups. + +Contrasting with the previous year, where runtime heavily influenced overall rankings, this year presents a shift. The best performer in runtime only secured third place in the overall competition. Specifically, EMSR, the overall winner, ranked fifth in runtime, sixth in parameters, and seventh in FLOPs. Similarly, XiaomiMM, which came second overall, was fourth in runtime, eleventh in parameters, and thirteenth in FLOPs. This demonstrates that: i) A balanced + +Table 1. Results of Ninth NTIRE 2025 Efficient SR Challenge. The performance of the solutions is compared thoroughly from three perspectives including the runtime, FLOPs, and the number of parameters. The underscript numbers associated with each metric score denote the ranking of the solution in terms of that metric. For runtime, “Val.” is the runtime averaged on DIV2K_LSDIR_valid validation set. “Test” is the runtime averaged on a test set with 200 images from DIV2K_LSDIR_test set, respectively. “Ave.” is averaged on the validation and test datasets. “#Params” is the total number of parameters of a model. “FLOPs” denotes the floating point operations. Main Track combines all three evaluation metrics. The ranking for the main track is based on the score calculated via Eq. 2, and the ranking for other sub-tracks is based on the score of each metric via Eq. 1. Please note that this is not a challenge for PSNR improvement. The “validation/testing PSNR” is not ranked. For all the scores, the lower, the better. + +
TeamsPSNR [dB]Runtime [ms]#Params [M]FLOPs [G]Sub-Track ScoresMain-Track
Val.TestVal.TestAve.Runtime#ParamsFLOPsOverall ScoreRanking
EMSR26.9226.9910.2689.7209.9940.1318.542.46(5)2.58(6)2.78(7)2.531
XiaomiMM26.9227.009.9589.1329.5450.1489.682.36(4)2.92(11)3.19(13)2.572
ShannonLab26.9027.008.9388.3028.6200.17211.232.18(1)3.48(17)3.84(18)2.623
TSSR26.9027.029.8128.8989.3550.16410.692.32(2)3.28(15)3.60(16)2.664
Davinci26.9227.0011.4269.87610.6510.1469.552.61(6)2.88(9)3.14(11)2.735
SRCB26.9227.0011.4129.96010.6860.1469.552.62(7)2.88(9)3.14(11)2.746
Rochester26.9427.0111.93410.45411.1940.15810.302.74(8)3.14(14)3.43(14)2.917
mbga26.9027.009.8229.2089.5150.19212.562.36(3)4.02(19)4.50(20)2.938
IESR26.9026.9913.76012.58213.1710.1438.323.28(10)2.82(7)2.71(6)3.129
ASR26.9027.0013.86411.98412.9240.1549.063.21(9)3.05(12)2.96(8)3.1510
VPEG_O26.9026.9916.35613.92615.1410.1459.423.92(12)2.86(8)3.09(9)3.6311
mmSR26.9527.0514.45012.03613.2430.21213.853.30(11)4.65(21)5.25(23)3.8012
ChanSR26.9227.0316.73815.59216.1650.21011.594.29(16)4.58(20)4.01(19)4.2913
Pixel Alchemists26.9027.0117.32214.60815.9650.21312.934.22(14)4.68(22)4.70(21)4.3614
MiSR26.9027.0217.05614.98816.0220.21313.864.24(15)4.68(22)5.26(24)4.4615
LZ26.9027.0116.98015.45016.2150.25216.424.31(17)6.21(25)7.15(25)5.0216
Z626.9026.9920.36216.18418.2730.30318.705.19(20)8.99(27)9.39(27)6.3917
TACO_SR26.9427.0517.82815.65216.7400.34220.034.52(18)11.92(30)11.01(30)6.6118
AIOT_AI26.9027.0019.83618.15818.9970.30119.565.54(21)8.86(26)10.41(28)6.7719
JNU62026.9027.0120.68818.28219.4850.32520.315.79(22)10.54(29)11.39(31)7.3420
LVGroup_HFUT26.9627.0716.39414.87615.6350.42627.874.09(13)21.91(33)28.15(34)10.3821
SVM26.9227.0430.61028.13429.3720.25113.3914.13(23)6.16(24)4.97(22)11.5622
YG26.9227.0433.65831.61432.6360.0935.8218.96(24)1.96(5)2.01(5)13.8723
NanoSR26.9727.0817.93016.30017.1150.55136.024.68(19)54.20(35)74.72(35)22.6124
MegastudyEdu Vision AI27.0127.1339.37637.52838.4520.16910.6332.03(25)3.40(16)3.57(15)23.4725
XUPTBoys26.9127.0350.56435.01242.7880.0723.3947.36(26)1.68(3)1.50(2)33.6326
MILA26.9027.0244.36242.03443.1980.0874.9349.14(27)1.88(4)1.80(4)34.9527
AiMF_SR26.9827.1046.59443.09244.8430.1809.4857.00(28)3.69(18)3.11(10)40.9228
EagleSR27.0427.1647.73045.19246.4610.35221.8965.95(29)12.82(31)13.76(32)50.1529
BVIVSR26.9726.9949.48846.79848.1430.15510.7976.75(30)3.07(13)3.64(17)54.7330
HannahSR26.9027.0258.28641.42249.8540.0603.7589.55(31)1.54(2)1.57(3)63.1531
VPEG_C26.9027.0060.04640.95050.4980.0443.1394.90(32)1.38(1)1.45(1)66.8632
CUIT_HT27.0927.2062.03859.10660.5720.30919.75235.36(33)9.39(28)10.65(29)167.7633
GXZY AI27.0127.13102.92499.102101.0130.42825.889.02e3(34)22.23(34)22.18(33)6.32e334
SCMSR26.9227.00133.866114.088123.9770.39317.627.15e4(35)17.25(32)8.25(26)5.01e435
IPCV27.2727.40366.924357.268362.0960.86665.661.51e14(36)531.32(37)2.60e3(36)1.05e1436
X-L27.0727.21525.966479.346502.6560.96670.834.81e19(37)1.10e3(38)4.83e3(37)3.36e1937
Quantum Res27.2927.40574.632558.934566.7830.79076.091.56e22(38)306.32(36)9.07e3(38)1.09e2238
The following methods are not ranked since their validation/testing PSNR (underlined) is not on par with the threshold.
SylabSR24.3624.4628.58024.82626.7030.0727.9011.111.682.588.41-
NJUPCA26.7026.8070.20252.93261.5672.30830.11257.451.83e736.822.75e6-
DepthIBN26.5626.6639.15436.87638.0150.1217.7130.802.402.5222.30-
Cidaut AI26.8626.9527.22024.97426.0970.21012.8310.524.584.658.75-
IVL26.6626.7618.74616.94417.8450.24015.645.005.696.515.33-
Baseline26.9327.0123.91220.45422.1830.27616.77.397.397.397.39-
+ +approach to model design, optimizing across multiple metrics rather than focusing on a single aspect, is becoming crucial in competitive evaluations. ii) Achieving top performance in one metric does not guarantee similar success in overall rankings, underscoring the complexity of model optimization in real-world scenarios. This year's goal was to encourage a balanced pursuit of speed and efficiency, a challenge that has evidently led to significant innovations and advancements in model design. + +PSNR. Team Quantum Res, IPCV, X-L, and CUIT_HTT demonstrate superior PSNR values, a critical evaluation metric in super-resolution. Specifically, Quantum Res and IPCV lead with an exceptional 27.40 dB, closely followed by X-L with 27.21 dB, and CUIT_HTT at 27.20 dB on the DIV2K_LSDIR_test set. Despite these impressive perfor + +mances, it is essential to emphasize that the primary focus of this challenge is on efficiency in super-resolution. Accordingly, we have adjusted the PSNR criteria, setting rigorous lower thresholds of 26.90 dB and 26.99 dB for the DIV2K_LSDIR_valid and DIV2K_LSDIR_test sets, respectively. This adjustment is designed to prioritize a balance between high performance and computational efficiency. A commendable total of 38 teams met this adjusted benchmark, demonstrating their capability to effectively balance image quality with efficiency. However, teams like IVL, Cidaut AI, SylabSR DepthIB, and NJUPCA, while notable for their efficiency, did not achieve the required PSNR levels. This highlights the ongoing challenge of optimizing super-resolution processes that meet both efficiency and performance standards, underscoring the complex nature of + +advancements in this field. + +# 3.1. Main Ideas + +Throughout this challenge, several techniques have been proposed to enhance the efficiency of deep neural networks for image super-resolution (SR) while striving to maintain optimal performance. The choice of techniques largely depends on the specific metrics that a team aims to optimize. Below, we outline some typical ideas that have emerged: + +- Distillation is an effective manner to maintain the PSNR performance without increasing computation cost during inference. The team EMSR added only the ConvLora-Like [7] operation into the base model. Similarly, team ESPAN also proposed to use the self-distillation for progressive learning strategy validated from [42]. +- Re-parameterization [22] [24, 126] is commonly used in this challenge. Usually, a normal convolutional layer with multiple basic operations ( $3 \times 3$ convolution, $1 \times 1$ operation, first and second-order derivative operators, skip connections) is parameterized during training. During inference, the multiple operations that reparameterize a convolution could be merged back into a single convolution. e.g., Some top teams (i.e., XiaomiMM, mmSR, HannahSR, etc) used this operation in their methods. +- Parameter-free attention mechanism is validated as a useful technique to enhance computational efficiency [24, 126]. Specifically, XiaomiMM proposed a swift parameter-free attention network based on parameter-free attention, which achieves the lowest runtime while maintaining a decent PSNR performance. +- Incorporating multi-scale information and hierarchical module design are proven strategies for effectively fusing critical information. For instance, solutions such as HannahSR, XuPTBoys, and ChanSR have successfully utilized multi-scale residual connections and hierarchical module designs to enhance their performance. +- Network pruning plays an important role. It is observed that a couple of teams (i.e., ASR, Davinci) used network pruning techniques to slightly compress a network. This leads to a more lightweight architecture without a heavy performance drop. +- Exploration with new network architectures is conducted. Besides the common CNN or Transformers, the state space model (i.e., vision mamba [30, 32]) was tried by GXZY.AI in this challenge, which was also validated in the last NTIRE ESR challenge [91]. +- Various other techniques are also attempted. Some teams also proposed solutions based on neural architecture search, vision transformers, frequency processing, multi-stage design, and advanced training strategies. + +# 3.2. Fairness + +To ensure the integrity and fairness of the Efficient SR Challenge, we meticulously established a set of rules focusing on the permissible datasets for training the models. Participants were allowed to augment their training with external datasets, such as Flickr2K, to promote diverse and comprehensive model training experiences. However, to guarantee an unbiased evaluation, the use of additional DIV2K and LSDIR validation sets, which include both high-resolution (HR) and low-resolution (LR) images, was explicitly prohibited during the training phase. This restriction aimed to maintain the validation set's integrity as a vital benchmark for assessing the proposed networks' performance and generalizability. Moreover, using LR images from the DIV2K and LSDIR test sets for training was strictly forbidden, ensuring the test dataset's purity and upholding the evaluation process's integrity. Lastly, the adoption of advanced data augmentation techniques during training was encouraged as a fair practice, allowing participants to enhance their models within the defined rules and guidelines. + +# 3.3. Conclusions + +The analysis of the submissions to this year's Efficient SR Challenge allows us to draw several important conclusions: + +- Firstly, the competition within the image super-resolution (SR) community remains intense. This year, the challenge attracted 244 registered participants, with 43 teams making valid submissions. All proposed methods have enhanced the state-of-the-art in efficient SR. Notably, the competition among the top three teams has intensified, with last year's winner ranking second this year. +- Secondly, unlike in previous challenges, dominance in runtime no longer characterizes the top-ranking teams. Instead, more balanced solutions that consider all aspects of performance are proving to be more beneficial. +- Thirdly, consistent with the success of deep learning techniques like DeepSeek, the distillation approach has significantly contributed to performance improvements without adding computational complexity. +- Fourthly, re-parameterization and network compression have emerged as crucial techniques in enhancing efficiency in SR. Ongoing exploration in these areas is encouraged to further boost efficiency. +- Fifthly, the use of large-scale datasets, such as the one described in [64], for pre-training has been shown to enhance accuracy significantly. Typically, training incorporates multiple phases, gradually increasing the patch size and decreasing the learning rate, optimizing the training process. +- Sixthly, this year's challenge saw the introduction of the state space model, presenting a novel approach that may influence future research directions in the field. + +Overall, by considering factors like runtime, FLOPs, + +and parameter count simultaneously, it is feasible to design models that optimize across multiple evaluation metrics. Finally, as computational capabilities continue to evolve, the focus on optimizing models for runtime, FLOPs, and parameter efficiency becomes increasingly vital. With advancements in both hardware and software, we expect the development of more sophisticated and efficient models in the super-resolution domain. The pursuit of efficiency in SR is likely to remain a key driver of innovation, promising exciting advancements and continual progress in the field. + +# 4. Challenge Methods and Teams + +# 4.1. EMSR + +Method. The overall architecture of the team EMSR is shown in Fig. 1, which is based on the leading efficient super-resolution method SPAN [112]. Inspired by ConvLora [7], the team proposes SconvLB, which incorporates ConvLora into SPAB to improve performance without increasing computation complexity. Specifically, given a pre-trained convolutional layer in SPAB, they update it by adding Lora layers, and representing it with a low-rank decomposition: + +$$ +W _ {\text {C o n v L o r a}} = W _ {P T} + X Y, \tag {3} +$$ + +where $W_{ConvLora}$ denotes the updated weight parameters of the convolution, $W_{PT}$ denotes the original pre-trained parameters of the convolution, $X$ is initialized by random Gaussian distribution, and $Y$ is zero in the beginning of training. Note that the Lora weights can be merged into the main backbone. Therefore, ConvLoras don't introduce extra computation during inference. + +They adopt the pre-trained SPAN-Tiny model [112] with 26 channels. They replace the SPAB in SPAN with our proposed SconvLB, and also add ConvLora into the pixel shuffle block and the convolution before it. During training, they freeze the original weight and bias of the convolution and only update the Lora parameters. + +Optimization. To supervise the optimization of SconvLB, they adopt a knowledge-based distillation training strategy. They adopt spatial affinity-based knowledge distillation [37] to transfer second-order statistical info from the teacher model to the student model by aligning spatial feature affinity matrices at multiple layers of the networks. Given a feature $F_{l} \in R^{B \times C \times W \times H}$ extracted from the $l$ -th layer of the network, they first flatten the tensor along the last two dimensions and calculate the affinity matrix $A_{\text{spatial}}$ . Then the spatial feature affinity-based distillation loss can be formulated as: + +$$ +L _ {A D} = \frac {1}{| A |} \sum_ {l = 1} ^ {n} \left\| A _ {l} ^ {S} - A _ {l} ^ {T} \right\| _ {1}, \tag {4} +$$ + +where $A_{l}^{S}$ and $A_{l}^{T}$ are the spatial affinity matrix of student and teacher networks extracted from the feature maps of the $l$ -th layer, respectively. $|A|$ denotes the number of elements in the affinity matrix. Specifically, the team applies the distillation loss after each SconvLB. + +Except for the distillation loss in the feature space, the team applies a pixel-level distillation loss: + +$$ +L _ {T S} = \left\| \mathcal {T} \left(I _ {L R}\right) - \mathcal {S} \left(I _ {L R}\right) \right\| _ {1}, \tag {5} +$$ + +where $\mathcal{T}$ and $S$ denote the teacher network and the student network, respectively. $I_{LR}$ denotes the LR image. + +They also apply the $L_{2}$ loss: + +$$ +L _ {r e c} = \left\| I _ {H R} - \mathcal {S} \left(I _ {L R}\right) \right\| _ {2} ^ {2}, \tag {6} +$$ + +where $I_{HR}$ denotes the ground truth high-resolution image. The overall loss is: + +$$ +L _ {t o t a l} = \lambda_ {1} L _ {r e c} + \lambda_ {2} L _ {T S} + \lambda_ {3} L _ {A D}. \tag {7} +$$ + +Training Details. The team uses DIV2K and LSDIR for training. Random flipping and random rotation are used for data augmentation. The training process is divided into two stages. + +1. Stage One: HR patches of size $192 \times 192$ are randomly cropped from HR images, and the mini-batch size is set to 8. The model is trained by minimizing the $L_{total}$ mentioned above with the Adam optimizer. The learning rate is $1 \times 10^{-4}$ . A total of $30k$ iterations are trained. +2. Stage Two: In the second stage, the team increases the size of the HR image patches to $256 \times 256$ , with other settings remaining the same as in the first stage. + +Throughout the entire training process, they employ an Exponential Moving Average (EMA) strategy to enhance the robustness of training. + +# 4.2. XiaomiMM + +Method Details. The team proposes an accelerated variant of the Swift Parameter-free Attention Network (SPAN) [112], called SPANF, which is built upon the fundamental SPAB block. To enhance the inference speed, SPANF introduces several key modifications compared to the original SPAN model. Firstly, they remove the last SPAB block, which reduces computational complexity without significantly impacting performance. Secondly, they increase the number of channels to 32, providing a better balance between model capacity and speed. Thirdly, they replace the first convolution layer with a nearest neighbor upsampling operation, which is computationally less intensive and accelerates the upsampling process. Lastly, they implement simple modifications to the shortcut connections within the network to further streamline computations. These changes collectively enable SPANF to achieve faster + +![](images/8ac9e00d1996213e6f79f7b908791efe8ef055eead3451ed8eaed1fab8097e08.jpg) +Figure 1. Team EMSR: The team incorporates ConvLoras into the network to increase the performance without adding extra complexity. + +![](images/aad7982650cb376bfab88d1f41e6514c27a63d4ddf9e751f866833b7ce6411d9.jpg) +Figure 2. The proposed SPANF architecture. The main structure is basically the same as SPAN [112], but one SPAB module is reduced, and the number of channels is 32. + +inference speeds while maintaining competitive image quality. The evaluations on multiple benchmarks demonstrate that SPANF not only upholds the efficiency of SPAN's parameter-free attention mechanism but also offers superior speed, making it highly suitable for real-world applications, particularly in scenarios with limited computational resources. + +Implementation Details. The dataset utilized for training comprises of DIV2K and LSDIR. During each training batch, 64 HR RGB patches are cropped, measuring $256 \times 256$ , and subjected to random flipping and rotation. The learning rate is initialized at $5 \times 10^{-4}$ and undergoes a halving process every $2 \times 10^{5}$ iterations. The network undergoes training for a total of $10^{6}$ iterations, with the L1 loss function being minimized through the utilization of the Adam optimizer [54]. They repeated the aforementioned training settings four times after loading the trained weights. Subsequently, fine-tuning is executed using the L1 and L2 loss functions, with an initial learning rate of $1 \times 10^{-5}$ for $5 \times 10^{5}$ iterations, and HR patch size of 512. They con + +duced finetuning on four models utilizing both L1 and L2 losses, and employed batch sizes of 64 and 128. Finally, they integrated these models' parameters to obtain our ultimate model. + +# 4.3. ShannonLab + +Method. The method proposed by the team draws inspiration from ECBSR and SPAN. First, they optimized the ECB module by introducing a 1x1 convolutional layer for channel expansion before the input tensor enters the ECB module. After processing, another 1x1 convolution restores the original channel dimensions, while incorporating residual connections. During inference, these components can be merged into a standard 3x3 convolution through reparameterization, thereby enhancing the ECB module's effectiveness without increasing computational overhead. As illustrated in Fig.3, The complete model architecture of TSR comprises a shallow feature extraction convolution, a reconstruction convolution, a PixelShuffle module, and four REECB block which made of stacked optimized ECB. + +Training Details. The model is trained on the DIV2K and LSDIR train dataset with random flipping and rotation applied for data augmentation. The Adam optimizer is consistently employed throughout the training process. The entire training process is divided into five steps. + +1. HR patches of size $256 \times 256$ are randomly cropped from HR images, and the mini-batch size is set to 32. L1 loss is used and the initial learning rate is set to 5e-4, with a cosine learning rate decay strategy. The total iterations is 500k. +2. HR patches of size $256 \times 256$ are randomly cropped from HR images, and the mini-batch size is set to 32. L1 and L2 loss is used and the initial learning rate is set to 5e-4, with a cosine learning rate decay strategy. The total iterations is 1000k. +3. HR patches of size $512 \times 512$ are randomly cropped from HR images, and the mini-batch size is set to 64. L2 + +![](images/d8575e44d65aceb2fd23b5bd961f7c359c9fc2f3717d1291c9ae893c01490dc3.jpg) +Figure 3. Team ShannonLab: The pipeline of TSR. + +loss is used and the initial learning rate is set to 2e-4, with a cosine learning rate decay strategy. The total iterations is $1000\mathrm{k}$ . + +4. HR patches of size $512 \times 512$ are randomly cropped from HR images, and the mini-batch size is set to 64. L2 loss is used and the initial learning rate is set to 1e-4, with a cosine learning rate decay strategy. The total iterations is 1000k. +5. HR patches of size $512 \times 512$ are randomly cropped from HR images, and the mini-batch size is set to 64. L2 loss is used and the initial learning rate is set to 1e-5, with a cosine learning rate decay strategy. The total iterations is 1000k. + +# 4.4. TSSR + +Method. They combined the ideas of reparameterization and attention mechanism to design a model that can capture image information in the network and effectively achieve image super-resolution. + +Training Details. The training process is divided into three steps. + +1. HR patches of size $256 \times 256$ are randomly cropped from HR images, and the mini-batch size is set to 64. L1 loss with AdamW optimizer is used and the initial learning rate is set to 0.0005 and halved at every 100k iterations. The total iterations is 500k. +2. HR patches of size $256 \times 256$ are randomly cropped from HR images, and the mini-batch size is set to 64. L1 and L2 loss with AdamW optimizer is used and the initial learning rate is set to 0.0002 and halved at every 100k iterations. The total iterations is 1000k. +3. HR patches of size $512 \times 512$ are randomly cropped from HR images, and the mini-batch size is set to 64. L2 loss with AdamW optimizer is used and the initial learning rate is set to 0.0001 and halved at every 100k iterations. The total iterations is 1000k. + +# 4.5. mbga + +Architecture. The team proposes the ESPAN, which is based on SPAN [111]. Through evaluations of depth-channel combinations in SPAN on an A6000 GPU, they determined that setting the number of channels to 32 yields higher efficiency than 28 channels. To reduce parameters and FLOPs, a depth of 6 was adopted. Additionally, a $9 \times 9$ convolution replaced the conventional $3 \times 3$ convolution at the network's input stage since they find that $9 \times 9$ convolution is faster than $3 \times 3$ convolution on A6000. + +![](images/8625a75f76c2da91bb4d2e4dae9cd14a3e706d54d637ec9410ab4e46f76d0fe9.jpg) +Figure 4. Team mbga: General Reparameterization. + +![](images/ffaeb29db7fde13f35fcbf1cb39fb49d6fad1f29499bad6f15b795904e69fbdc.jpg) +Figure 5. Team mbga: ESPAN with self distillation. + +General Reparameterization. Inspired by MobileOne [107] and RepVGG [23], the team proposes a generalized reparameterization block (Fig. 4). The block consists of four $1 \times 1 - 3 \times 3$ convolution branches, one $1 \times 1$ convolution branch, and one $3 \times 3$ convolution branch. Skip connections are omitted due to empirical observations of training instability. While additional duplicated branches or $3 \times 3 - 1 \times 1$ convolution branches are feasible, the current configuration is found to offer superior performance consistency during optimization. + +Self distillation and progressive learning. Inspired by RIFE [42], self-distillation is incorporated into their training pipeline. The teacher model shares the identical backbone as the student model but includes three extra SPAB blocks appended to the student's backbone (Fig. 5). A self-distillation loss similar to RIFE's formulation is adopted to co-train the teacher and student networks. This design enables the teacher model to learn robust backbone features. After the distillation phase, the student loss and distillation loss components are removed, and the entire teacher model is fine-tuned. Leveraging the pre-trained robust teacher, progressive learning is employed: the extra SPAB blocks are gradually removed from the teacher's backbone, finally resulting in an architecture identical to the original student model. + +Frequency-Aware Loss. Since small models have limited parameters, during training, they should make the model fo + +cus more on important (or difficult) areas. In their methods, two types of frequency-aware losses are employed. The first type is the DCT loss. They use the discrete cosine transform (DCT) to convert the RGB domain to the frequency domain and then apply the L1 loss to calculate the difference. The other type is the edge loss. They add a blur to the image and then subtract the blurred image from the original one to obtain the high frequency area. Subsequently, the L1 loss is calculated on this high frequency area. + +Training details: The training process contains two stages. And the training dataset is the DIV2K_LSDIR_train. General reparameterization is used on the whole process. + +I. At the first stage, they use self distillation to train the teacher model. + +- Step1. The team first trains a 2x super-resolution model. HR patches of size 256x256 are randomly cropped from HR images, and the mini-batch size is set to 64. L1 loss and self distillation loss with AdamW optimizer are used and the initial learning rate is set to 0.0001 and halved at every 100k iterations. The total iterations is 500k. This step is repeated twice. And then they follow the same training setting and use 2x super-resolution model as pretrained model to train a 4x super-resolution model. This step is repeated twice. +- Step2. HR patches of size 512x512 are randomly cropped from HR images, and the mini-batch size is set to 16. MSE loss, frequency-aware loss and self distillation loss with AdamW optimizer are used and the initial learning rate is set to 0.0001 and halved at every 100k iterations. The total iterations is 500k. This step is also repeated twice. +- Step3. They only train the teacher model. HR patches of size 512x512 are randomly cropped from HR images, and the mini-batch size is set to 16. MSE loss and frequency-aware loss with AdamW optimizer are used and the initial learning rate is set to 0.00005 and halved at every 100k iterations. The total iterations is 500k. This step is also repeated twice. + +II. At the second stage, they use progressive learning to get the final student model. + +- Step4. They drop the additional SPAB block one by one. HR patches of size 512x512 are randomly cropped from HR images, and the mini-batch size is set to 16. L1 loss with AdamW optimizer are used and the initial learning rate is set to 0.0001 and halved at every 100k iterations. The total iterations is 500k. +- Step5. They repeat the following training process many times until convergence. HR patches of size 512x512 are randomly cropped from HR images, and the mini-batch size is set to 16. MSE loss and frequency-aware loss with AdamW optimizer are used and the initial learning rate is set to 0.00005 and halved at every 100k iterations. The total iterations is 500k. + +# 4.6. VPEG_C + +General Method Description. As illustrated in Fig. 6, they propose a Dual Attention Network (DAN) for the lightweight single-image super-resolution task. The core components of DAN consist of three parts: a Local Residual Block (LRB), a Spatial Attention Block (SAB), and a Channel Attention Block (CAB). + +Local Residual Block (LRB). They leverage the $1 \times 1$ convolution layers followed by a $3 \times 3$ depthwise convolution as the basic unit, repeated three times. Specially, GELU activation is applied on each layers, and the features are passed in a densely connected manner. At the end of the block, feature maps from different levels are aggregated using channel concatenation, effectively capturing local image details. + +Spatial Attention Block (SAB). They adopt the spatial attention design of SMFANet [144], which employs a variance-constrained feature modulation mechanism to aggregate spatial feature. This allows efficient spatial interaction with minimal computational cost. + +Channel Attention Block (CAB). Global channel-wise information is modeled through a self-gating mechanism that enhances local representations and increases model non-linearity. This is followed by a key-value shared MDTA [132] for global interaction and a GDFN [132] for feature refinement. + +Training Description. The proposed DAN consists of 6 feature mixing modules with 16 channels. The training process is divided into two stages: + +1. Pre-training Stage: They pre-train DAN using 800 images from the DIV2K [100] and the first 10K images of the LSDIR [64] datasets. The cropped LR image size is $72 \times 72$ , and the mini-batch size is set to 64. The DAN is trained by minimizing L1 loss and the frequency loss[14] with Adam optimizer for total 800, 000 iterations. The initial learning rate is set to 2e-3 and halved at 200K, 400K, 600K, 700K. +2. Fine-tuning Stage: They fine-tune the model on the 800 images of DIV2K [100] and the first 10K images of the LSDIR [64] datasets. The cropped LR image size is $72 \times 72$ , and the mini-batch size is set to 64. The DAN is trained by minimizing PSNR loss with the Adam optimizer for total 200, 000 iterations. They set the initial learning rate to 5e-4 and halve it at 50K, 100K, 150K, and 175 K. + +# 4.7. XUPTBoys + +General Method Description. The XUPTBoys team proposed the Frequency-Guided Multilevel Dispersion Network (FMDN), as shown in Fig. 7.FMDN adopts a similar basic framework to [45, 67, 71, 81]. + +Based on the above analysis, they propose the new Frequency-Guided Multi-level Dispersion Block(FMDB) and the new Frequency-Guided Multi-level Dispersion + +![](images/36311b6faf268e669dcd643228db99b83d29abb2a47e421a048368d4aa625818.jpg) +Figure 6. Team VPEG_C: An overview of the DAN. + +![](images/bd994d0588e6812fed53823ee522107759f02c0a3d97cd1a32939c53e159da5f.jpg) +Figure 7. Team XUPTBoys: The whole framework of Frequency-Guided Multi-level Dispersion Network (FMDN). + +Block Basic(FMDB-B) as the base block of FMDN. As shown in Fig. 8 they use Hierarchical Variance-guided Spatial Attention(HVSA), Reallocated Contrast-Aware Channel Attention (RCCA) as alternatives to Enhanced Spatial Attention (ESA) [73] and Contrast-Aware Channel Attention (CCA) [44], Frequency-Guided Residual block (FRB), Asymmetric FeedForward Network (AFFN), Multilevel Residual Convolution (MRConv) and Multilevel Residual Convolution Basic(MRConv-B). The difference between FMDB and FMDB-B is that the former uses MRConv, while the latter uses MRConv-B. + +In HVSA, the effects of multilevel branching and local variance on performance are examined. Small-window multilevel branches fail to capture sufficient information, while local variance within a single branch can create significant weight disparities. To address these issues, [81] was enhanced to introduce the D5 and D7 branches, which effectively utilize local variance to capture information + +rich regions while balancing performance and complexity. In RCCA, this approach improves the traditional channel attention mechanism by not only reallocating weights across channels but also better managing shared information among them. Introduces complementary branches with $1 \times 1$ convolutions and GELU activation functions, which help redistribute complementary information, improving the uniqueness of each channel. In FRB, it enhances feature representation using convolutional layers and GELU activation. It normalizes input, extracts features with depthwise convolutions of different kernel sizes, and combines them through residual connections to preserve spatial information for effective image processing. In AFFN, it applies layer normalization and a $1 \times 1$ convolution to expand feature dimensions. It then uses two depthwise convolutions with different kernel sizes, combines the results with GELU activation, and projects the output back to the original dimension with a residual connection. In MRConv and + +![](images/44d502a6c0ed805d464c3c6f19148e6c1e459ecc1aa0d51d667b715fea387a24.jpg) + +![](images/6ea31666372ce72972d1e01c9f83342103a7a680943cd9cd751793bd9f5c5350.jpg) +(f) MRCov-B + +![](images/c2192d342d64556a1a2860e014da3529e5c2dc30bbd7246a3a3f93bc4b03ebb7.jpg) +(b) HVSA + +![](images/d0ea7d9b2ecaf56aa13a4f1b039a13ff28f10a58d64cc690912ef0f4e806004e.jpg) +(c) FRB + +![](images/b758339bd61eafff580164a4527468d8f542327ebe7a1f14835a8397e41c3165.jpg) +(d) AFFN +Figure 8. Team XUPTBoys: The details of each component. (a) FMDB: Frequency-Guided Multi-level Dispersion Block; (b) HVSA: Hierarchical Variance-guided Spatial Attention; (c) FRB: Frequency-Guided Residual Block; (d) AFFN: Asymmetric FeedForward Network; (e) RCCA: Reallocated Contrast-aware Channel Attention; (f) MRConv-B/MRConv: Multilevel Residual Convolution Basic and Multilevel Residual Convolution + +![](images/b48235d2ae7a52f0f87af13e385c348076ea1d6bd0051f75995284647bdfd624.jpg) +(e) RCCA + +MRConv-B, MRConv and MRConv-B use convolution kernels of different sizes for parallel convolution, and finally activate the output using GELU and combine it with residual connections, effectively preserving spatial information. + +Training Description. The proposed FMDN has 3 FMDB-Basic blocks and 1 FMDB block, in which the number of feature channels is set to 24. The details of the training steps are as follows: + +1. Pretraining on the DIV2K [102] and and Flickr2K [70]. HR patches of size $256 \times 256$ are randomly cropped from HR images, and the mini-batch size is set to 64. The model is trained by minimizing the L1 loss function [77] with the Adam optimizer [53]. The initial learning rate is set to $2 \times 10^{-3}$ and halved at $\{100k, 500k, 800k, 900k, 950k\}$ -iteration. The total number of iterations is $1000k$ . +2. Finetuning on 800 images of DIV2K and the first 10k images of LSDIR [64]. HR patch size and mini-batch size are set to $384 \times 384$ and 64, respectively. The model is fine-tuned by minimizing L2 loss function [77]. The initial learning rate is set to $5 \times 10^{-4}$ and halved at $\{500k\}$ -iteration. The total number of iterations is + +1000k. + +# 4.8. HannahSR + +General Method Description. The architecture of the proposed network is depicted in Fig. 9, which is inspired by previous studies such as AGDN [114], MDRN [80] and SPAN [109]. They propose a Multi-level Refinement and Bias-learnable Attention dual branch Network (MRBAN). More specifically, they build upon the AGDN framework by constructing another branch consisting of one $3 \times 3$ convolution layer (ISRB) and one $1 \times 1$ convolution layer to enhance the overall performance in a learnable way. Meanwhile, they replace the concat module in the AGDN with a direct element-wise summation, for the sake of harvesting significant savings of the parameters. + +In addition, they propose the multi-level refinement and bias-learnable attention block (MRBAB) as the basic block of our network. As described in Figure 10, they attempt to minimize the information loss induced by Sigmoid module. When confronted with a negative input with a large absolute value, the output of the Sigmoid module will be approximately equal to zero, which results in remarkable + +![](images/083bbb9f17a8948f4ae5b792fe6c480e191cd46a4ccd5b188e6f518badad2507.jpg) +Figure 9. Team HannahSR: The overall architecture of Multi-level Refinement and Bias-learnable Attention Dual Branch Network (MR-BAN). + +![](images/39e5ebdf23d3857b266c36552ee68e47923ad4ffcfb55aa17c77ee301d9f96b3.jpg) +(a) Team HannahSR: The MRBAB architecture. + +![](images/308e52fdb5c90b5da45b454cd7914587960c012e3e6058e86a7865d419d13375.jpg) +(b) Team HannahSR: The MRBA architecture. +Figure 10. Team HannahSR: The detailed architecture of the network MRBAN. (a) MRBAB: Multi-level Refinement and Bias-learnable Attention Block; (b) MRBA: Multi-level Refinement and Bias-learnable Attention; Other components: BSRB: Blueprint Shallow Residual Block [66]; BSConv: Blueprint Separable Convolution [66]; RCCA: Reallocated Contrast-aware Channel Attention [114]; SGSA: Sparse Global Self-attention [114]. + +information loss. To address this issue, SPAN [109] used an origin-symmetric activation function. They added a bias of $-0.5$ to the Sigmoid function, which allowed the information carried by negative inputs to be taken into account. + +However, when dealing with the larger positive inputs, their outputs would be approximately equal to 0.5. When compared with the original 1.0, they inevitably suffered from significant information loss. To tackle this issue, they set the + +negative bias as a learnable parameter so that it can be updated dynamically during the training process to optimally boost the accuracy performance. + +Eventually, they adopt the reparameterization technique. They replace the first $3 \times 3$ convolution layer with identical scale reparameterization block to extract richer local features for supplying the following layers with more valuable information, while standardizing the number of channels to an identical scale for lightweight super resolution networks to prevent incurring inappropriate model capacity increments. + +Training Strategy. The proposed MRBAN consists of 4 MRBAB, and the feature channel is set to 32. They adopt a four-step training strategy. The details of the training steps are as follows: + +1. Pretraining on the DIV2K [2] and Flickr2K [69] datasets with the patch size of $256 \times 256$ and the mini-batch size is set to 64. The MRBAN is trained by minimizing the L1 loss function with the Adam optimizer. The initial learning rate is set to $3 \times 10^{-3}$ and halved at $\{100\mathrm{k}, 500\mathrm{k}, 800\mathrm{k}, 900\mathrm{k}, 950\mathrm{k}\}$ -iteration. The number of iterations is $1000\mathrm{k}$ . +2. Initial fine-tuning on DIV2K and the first 10K images of LSDIR [64]. The patch size is $384 \times 384$ and the minibatch size is set to 32. The model is trained by minimizing the MSE loss function. The initial learning rate is set to $1.5 \times 10^{-3}$ and halved at $\{100\mathrm{k}, 500\mathrm{k}, 800\mathrm{k}, 900\mathrm{k}, 950\mathrm{k}\}$ -iteration. The number of iterations is $1000\mathrm{k}$ . +3. Advanced training on the DIV2K and the whole LSDIR datasets. The patch size is $384 \times 384$ and the mini-batch size is set to 64. The model is trained by minimizing the MSE loss function. The initial learning rate is set to $8 \times 10^{-4}$ and halved at $\{100\mathrm{k}, 500\mathrm{k}, 800\mathrm{k}, 900\mathrm{k}, 950\mathrm{k}\}$ -iteration. The number of iterations is $1000\mathrm{k}$ . This stage can be repeated twice. +4. Final fine-tuning on the DIV2K and the whole LSDIR datasets. The patch size is $448 \times 448$ and the mini-batch size is set to 128. The model is trained by minimizing the MSE loss function. The initial learning rate is set to $5 \times 10^{-6}$ and halved at $\{100\mathrm{k}, 500\mathrm{k}, 800\mathrm{k}, 900\mathrm{k}, 950\mathrm{k}\}$ -iteration. The number of iterations is $1000\mathrm{k}$ . + +# 4.9. Davinci + +Final Solution Description. They chose the Swift Parameter-free Attention Network [112] as their base model, the winner of the NTIRE2024 ESR track. After trying the evolution pipeline mentioned in SwinFIR [133], the content decoupling strategy proposed in CoDe [31], the pre-training fine-tuning paradigm, and the model compression techniques such as model pruning and knowledge distillation discussed in Ref [51] respectively, they employ the model Pruning of the last layer with $l_{2}$ norm of the baseline and introducing the mixup Augmentation as their final + +![](images/de2cac9923bfa4f52967dd4330cfae9d5dfebaa792800e8bb8bb19662aebe5ea.jpg) +Figure 11. Team Rochester: They reduce the channel dimension from 48 to 28 from the original design and introduce additional convolution to stabilize the attention feature maps from SPAB blocks. Example input and output are adapted from [99]. + +proposal to preserve the original parameter distributions as much as possible, termed PlayerAug. + +Training Details. After pruning the SPAN, they train it on the DIV2K_LSDIR mixed training set, cropping the patch size to 512. The random rotation and flip are configured for data augmentation. The Adam [54] optimizer with $\beta_{1} = 0.9$ and $\beta_{2} = 0.99$ and the L1 loss function are adopted to optimize the models, and the mini-batch size is set to 32. All the experiments are conducted on 8 L40S GPUs. + +# 4.10. Rochester + +Method Details. The proposed method, ESRNet, is an improved and more efficient variant of last year's XiaomiMM SPAN network [112]. The original SPAN network demonstrated strong generation quality but required + +complex training tricks and model fusion strategies, making it difficult to reproduce and computationally expensive. In contrast, ESRNet achieves similar performance with significantly reduced computational overhead, enhanced training stability, and improved inference speed. + +Model Architecture. A key aspect of ESRNet's design is its ability to maintain high performance while reducing computational costs. As shown in Fig. 11, their modifications include: + +- Retaining the first six SPAN attention blocks as core feature extraction components while introducing a lightweight convolutional layer to refine the extracted feature maps before fusing them with the original features. This modification enhances feature representation while stabilizing the training process. +- Reducing the number of feature channels from 48 to 26, leading to a substantial decrease in both model parameters and floating-point operations (FLOPs). This reduction not only lowers GPU memory consumption but also improves inference efficiency without degrading performance. +- Improved validation speed, as ESRNet requires fewer computations per forward pass, making it more suitable for real-time applications compared with the baseline method. + +Overall, ESRNet has approximately half the number of parameters and FLOPs compared to the baseline EFPN network, yet it maintains a high PSNR score, demonstrating that their modifications achieve an excellent trade-off between efficiency and performance. + +Training Methodology. They train ESRNet on RGB image patches of size $256 \times 256$ , applying standard augmentation techniques such as random flipping and rotation to enhance generalization. To ensure stable convergence and optimal performance, they adopt a three-stage training strategy: + +1. Initial Feature Learning: They train the model with a batch size of 64 using Charbonnier loss, a robust loss function that mitigates the effects of outliers. The Adam optimizer is used with an initial learning rate of $2 \times 10^{-4}$ , which follows a cosine decay schedule. +2. Refinement Stage: They progressively decrease the learning rate linearly from $2 \times 10^{-4}$ to $2 \times 10^{-5}$ , allowing the model to refine its learned features while maintaining stable gradients. +3. Fine-Tuning with L2 Loss: In the final stage, they adopt L2 loss to fine-tune the model, further enhancing detail restoration. The learning rate is further reduced from $2 \times 10^{-5}$ to $1 \times 10^{-6}$ for smooth convergence. + +By structuring the training into these stages, they eliminate the need for complex training tricks used in previous approaches while achieving more stable and reliable optimization. + +One of the most significant advantages of ESRNet is its improved validation time due to its optimized architecture. Compared to the original SPAN network, ESRNet achieves a similar PSNR score while reducing computational complexity. The model requires significantly fewer FLOPs and parameters, leading to a noticeable reduction in inference time and GPU memory usage. This makes ESRNet a practical solution for applications requiring both high-quality generation and efficient computation. + +# 4.11.IESR + +Model Design. As for the Efficint Super-Resolution competition, they proposed the Inference Efficient Super-Resolution Net (IESRNet). IESRNet is not a specific network, but a bag of tricks to make a Super-Resolution Network infer more Efficient on a GPU. They will apply these tricks based on DIPNet [128], which won the first place on the NTIRE2023 ESR challenge in runtime track [65]. The specific structure of IESRNet is shown in Fig. 12. They will describe the tricks they used in detail below. + +1. Remove bias in Conv. The bias add of the convolution is a relatively inefficient operation in the convolution layer. It only occupies a small part of the FLOPs in the convolution, but occupies $15\%$ or more of the runtime. They removed the bias of all convolutional layers except the ESA module, and the PSNR loss was less than 0.01db. +2. Less Residual Connection. Although residual connection helps the model converge during training, too many residual structures will introduce many additional operations, reducing the inference efficiency of the model. Therefore, they replace the two middle RRFB in DIPNet with reparameterization no residual block(RNRB) to balance the trade-off between inference efficiency and model accuracy. +3. Standard number of Conv channels. Since the convolution operator has different performance optimizations for different configurations, generally, convolutions with a standard number of channels (such as 32, 48, and 64) are more deeply optimized and therefore occupy higher inference efficiency on the GPU. Based on NVIDIA V100 GPU testing, a 48-channel $3^{*}3$ convolution is even faster than a 30-channel convolution, although the FLOPs is over doubled. For this reason, they set the number of feature channels to 32, and the number of ESA channels to 16. +4. Efficient activation function. They replace all activation functions in the network with SiLU [27], which performs well in super-resolution tasks and significantly outperforms the RELU. In addition to its great performance, SiLU is also very fast when inferring on GPUs due to its computational characteristics. + +![](images/f7a459c9d7c6eda2acf426998d26a40c8bebbe250c1ca7ffe0217eb5634b2e71.jpg) +Figure 12. Team IRSR: The overview of the proposed IESRNet. The IESRNet is built based on DIPNet [128]. + +5. Reparameterization. They adopt re-parameterization to enhance the representation capabilities of the model. They use complex re-parameterization structures to train during training and merge them into regular convolutions during inference without incurring additional computational overhead. The specific rep-structure is shown in Fig. 12(c). + +Implementation Details. The training dataset consists of DIV2K and the first 15,000 images of LSIDR [64]. Random flipping and rotation are adopt for Data Augmentation. They adopt a multi-stage training paradigm to train their super-resolution network. The details of training steps are as follows: + +1. Initial training: HR patches of size $256 \times 256$ are randomly cropped from HR images. They set the mini-batch as 128. The model is trained by minimizing the PSNR loss with the Adam optimizer. The initial learning rate is set to 5e-4, and halved per 200k iterations. The total number of iterations is 1000k. +2. Warm-Start Training: Load the pre-trained weight and train it three times with the same setting. +3. Finetune with increasing patch size: In this process, the training patch size is progressively increased to improve the performance, which is selected from [384, 512, 640]. For each patch size, they finetune the network with $1000\mathrm{k}$ + +iterations. And the initial learning rate is correspondingly selected from [2e-4, 1e-4, 5e-5]. The batch size decreases to 64 for saving GPU memory. All experiments are conducted on 8 NVIDIA V100 GPUs. + +# 4.12. ASR + +Model Design. The network architecture is built based on DIPNet [128], which won the first place on the NTIRE2023 ESR challenge runtime track [65]. They made several modifications to make it more efficient while maintaining the excellent performance. They call it DIPNetSlim. + +First of all, they did not use pruning as DIPNet dose. Although it can decrease the model parameters, it will degrade the inference speed of the model due to the irregular number of convolution channels. These operator configurations are not deeply optimized. For this reason, they set the number of feature channels to 32, and the number of ESA channels to 16. Second, they re-parameterize all 3x3 convolutional layers in the network. They adopt re-parameterization to enhance the expressiveness of the model. They use complex re-parameterization structures to train during training and merge them into regular convolutions during inference without incurring additional infer overhead. In addition, they changed the last convolution before the residual connection from 3x3 to 1x1, saving parameters while retain- + +ing the ability of feature normalization. Finally, they replace all activation functions in the network with SiLU [27], which performs well in super-resolution tasks and significantly outperforms the RELU. + +Implementation Details. The training dataset consists of DIV2K [103] and the first 15,000 images of LSIDR. The details of training steps are as follows: + +1. Initial Training: HR patches of size $256 \times 256$ are randomly cropped from HR images. They set the mini-batch as 128. The model is trained by minimizing the PSNR loss with the Adam optimizer. The initial learning rate is set to 5e-4, and halved per 200k iterations. The total number of iterations is 1000k. +2. Warm-Start Training: Load the pre-trained weight and train it three times with the same setting. +3. Finetune with increasing patch size: In this process, the training patch size is progressively increased to improve the performance, which is selected from [384, 512, 640]. For each patch size, they finetune the network with $1000k$ iterations. And the initial learning rate is correspondingly selected from [2e-4, 1e-4, 5e-4]. The batch size decreases to 64 for saving GPU memory. + +# 4.13. VPEG_O + +General Method Description. They introduce SAFMnV3, an enhanced version of SAFMN [96] for solving real-time image SR. This solution is mainly concentrates on improving the effectiveness of the spatially-adaptive feature modulation (SAFM) [96] layer. Different from the original SAFMN, as shown in Fig 13, the simplified SAFM layer is able to extract both local and non-local features simultaneously without channel splitting. Within this module, they use two $3 \times 3$ convolutions to project the input and use variance-constrained feature modulation operator [144] in branches with fewer channels, and finally aggregate these two parts of the feature, then refine the aggregated features via a feed-forward neural network. + +Training Description. The proposed SAFMNv3 consists of 6 feature mixing modules, and the number of channels is set to 40. They rain the network on RGB channels and augment the training data with random flipping and rotation. Following previous methods, the training process is divided into three stages: + +1. In the first stage, they randomly crop $256 \times 256$ HR image patches from the selected LSIDR [64] dataset, with a batch size of 64. The proposed SAFMNv3 is trained by minimizing L1 loss and the frequency loss[14] with Adam optimizer for total 800, 000 iterations. The initial learning rate is set to 2e-3, with a Cosine Annealing scheme [78]. +2. In the second stage, they increase the size of the HR image patches to $384 \times 384$ . The model is fine-tuned on the DF2K [100] by minimizing Charbonnier loss function. + +The initial learning rate is set to 5e-4, and the total iterations is $500\mathrm{k}$ + +3. In the third stage, the batch size is set to 64, and PSNR loss is adopted to optimize over $300\mathrm{k}$ iterations. The initial learning rate is set to 5e-5. + +Throughout the training process, they also employ an Exponential Moving Average (EMA) strategy to enhance the robustness of training. + +# 4.14.mmSR + +Method. They improve the model based on SAFMN++ [91] and name it FAnet as shown in Fig. 14. Compared to SAFMN++, their model achieves a higher PSNR with a lower computational cost. Unlike the original SAFMN++ method, they introduce modifications in both the data and model structure. In terms of model structure, as shown in the figure, they improve the Feature Mixing Module of the original architecture and incorporate the concept of reparameterization, designing the RFMM. They modify the convolutional extraction network preceding the original module into a parallel structure to accommodate multi-granularity feature extraction and apply re-parameterization [23] during inference. Furthermore, they adjust the downsampling factor in SimpleSAFM to 16 to achieve lower computational complexity. Regarding the data, in addition to utilizing the provided training dataset, they analyze the superresolution results of the model and identify common issues in fine-detail generation. Given constraints on model parameters and computational resources, it is impractical for a lightweight model to generate details identical to the ground truth. Therefore, they shift their focus to expanding the training dataset. Specifically, they use 10,800 images from the training dataset as input and employ convolutional neural networks such as Omni-SR [113] to generate new images. This additional data is incorporated into the training process to facilitate learning and mitigate the risk of learning bias caused by excessive learning difficulty. + +Training Details. They train their model on the DIV2K [100], Flickr2K [70], and LSDIR [64] datasets. The cropped low-resolution (LR) image size is set to 64 × 64 and subjected to random flipping and rotation. The FAnet model is optimized using the Adam optimizer with L1 loss minimization in a multi-stage training scheme. During the training phase, they set the initial learning rate to $2 \times 10^{-3}$ and the minimum learning rate to $1 \times 10^{-6}$ , training for 500,000 iterations with a mini-batch size of 512. In finetuning stage, Initialized with training phase weights, they fine-tune the model with the given training dataset and additional dataset which is proposed as above. They finetune the model using a learning rate of $1 \times 10^{-4}$ and the minimum learning rate set to $1 \times 10^{-6}$ , with a mini-batch size of 64. + +![](images/8cf587e3cbc1927fbca4656b8736d9f84ac6f220c9227c1ad73401744af36b10.jpg) +Figure 13. Team VPEG_O: An overview of the proposed SAFMNv3. + +![](images/9bbcbdf88644d05c0209ff8adeee4dc89fcb240fb6ca41121a1750176f9fa5bd.jpg) +Figure 14. Team mmSR: The overall network architecture of FAnet. + +# 4.15. ChanSR + +General Method Description. They propose the Edge Enhanced Convolutional Network (EECNet) for the efficient super-resolution task. The network architecture is inspired by the design of SRN [118], while fully exploring the capacity of reparameterizable convolution. The whole architecture is shown in Fig. 15(a). They introduce a predefined High-Pass Filter (HPF) branch to explicitly capture edge details, formulated as: + +$$ +\mathbf {K} _ {h p f} = \frac {1}{1 6} \left[ \begin{array}{r r r} - 1 & - 2 & - 1 \\ - 2 & 1 2 & - 2 \\ - 1 & - 2 & - 1 \end{array} \right]. \tag {8} +$$ + +Then they integrate the proposed HPF into the EDBB [116], creating the subEEC module. As subEEC can be mathematically equivalent to a standard $3 \times 3$ convolution, they replace the original $3 \times 3$ convolution in RRRB [25] with our subEEC to obtain the final EEC architecture, whose structure is shown in Fig. 15(b). Notably, to ensure valid re-parameterization, they initialize the bias of the first convolution layer as zero to compensate for the zeropadding operation in subEEC. + +To better capture global spatial information, they adopt the simplified Efficient Spatial Attention mechanism from + +SRN [118], whose structure is shown in Fig. 15(c). Compared with the original ESA, this implementation removes the $1 \times 1$ convolution layer and reduces computational complexity by employing only a single $3 \times 3$ convolution in the convolutional group. + +Training Description. The proposed EECNet contains eight EEBs, in which they set the number of feature maps to 32. Also, the channel number of the ESA is set to 16 similar to [56]. Throughout the entire training process, they use the Adam optimizer [54], where $\beta 1 = 0.9$ and $\beta 2 = 0.999$ . The model is trained for $1000k$ iterations in each stage. Input patches are randomly cropped and augmented. Data augmentation strategies included horizontal and vertical flips, and random rotations of 90, 180, and 270 degrees. Model training was performed using Pytorch 1.12.0 [85] on RTX 3090. Specifically, the training strategy consists of several steps as follows. + +1. In the starting stage, they train the model from scratch on the 800 images of DIV2K [4] and the first 10k images of LSDIR [64] datasets. The model is trained for a total $10^{6}$ iterations by minimizing L1 loss and FFT loss [15]. The HR patch size is set to $256 \times 256$ , while the mini-batch size is set to 64. They set the initial learning rate to $1 \times 10^{-3}$ and the minimum one to $1 \times 10^{-5}$ , which is updated by the Cosine Annealing scheme. +2. In the second stage, they increase the HR patch size to 384, while the mini-batch size is set to 32. The model is fine-tuned by minimizing the L1 loss and the FFT loss. They set the initial learning rate to $5 \times 10^{-4}$ and the minimum one to $1 \times 10^{-6}$ , which is updated by the Cosine Annealing scheme. +3. In the last stage, the model is fine-tuned with $480 \times 480$ HR patches, however, the loss function is changed to minimize the combination of L2 loss and FFT loss [15]. Other settings are the same as Stage 2. + +# 4.16. Pixel Alchemists + +Network Architecture. The overall architecture of team Pixel Alchemists is shown in Fig. 16. They propose a novel architecture named resolution-consistent UNet (RCUNet). The proposed network consists of four deep feature comple + +![](images/e9e1cda4695875e0b22a2ee15705c87b10f30d2b2a898093416b94ff467341a4.jpg) + +![](images/d48024e2b80356cc2faa2c14f1af7be0760ff89822241d890034e1dceb40e2d0.jpg) +Figure 15. Team ChanSR: Network architecture of the EECNet. + +ment and distillation blocks (DFCDB). Inspired by [35, 83], the input feature map is split along the channel dimension in each block. Then, four convolutional layers process one of the split feature maps to generate complementary features. The input features and complementary features are concatenated to avoid loss of input information and distilled by a conv-1 layer. Besides, the output feature map of DFCDB is further enhanced by the ESA layer [55]. + +Online Convolutional Re-parameterization. Reparameterization [136] has improved the performance of image restoration models without introducing any inference cost. However, the training cost is large because of complicated training-time blocks. To reduce the large extra training cost, online convolutional re-parameterization [41] is employed by converting the complex blocks into a single convolutional layer during the training stage. The architecture of RepConv is shown in Fig. 17. It can be converted to a $3 \times 3$ convolution during training, which saves the training cost. + +Training Details. The proposed RCUNet has four DFCDBs. The number of features is set to 48, and the number of ESA channels is set to 16. + +DIV2K [4] and LSDIR [64] datasets are used for training. The training details are as follows: + +1. The model is first trained from scratch with $256 \times 256$ patches randomly cropped from HR images from the DIV2K and LSDIR datasets. The mini-batch size is set to 64. The L1 loss and pyramid loss are minimized with the Adam optimizer. The initial learning rate is set to 1e-3 with a cosine annealing schedule. The total number of + +iterations is 1000k. + +2 Then the model is initialized with the pre-trained weights of Stage 1. The MSE loss and pyramid loss is used for fine-tuning with $512 \times 512$ HR patches and a learning rate of 1e-5 for 500k iterations. + +![](images/aeb111111f6a66fd1e33711c57d02e8b37f987757aecc478ba83f7f117f8f563.jpg) +Figure 16. Team Pixel Alchemists: RCUNet Architecture. + +# 4.17.LZ + +General Method Description. To enhance model complexity without increasing computational overhead, they focus on designing structurally simple yet expressively powerful components, notably through re-parameterization techniques. Drawing inspiration from ECBSR [137], + +![](images/e2f754ff416b95f767e85bf2241846e8853135980f1a33ca98e7b3b2dd78f4f5.jpg) +(a) Online Reparameterization + +![](images/65bfab3183f17ccd152cdcd70375e893a18feaf358a53ada5a84bdc2975a7327.jpg) +Figure 17. Team Pixel Alchemists: Online re-parameterization. +Figure 18. Team LZ: Detailed architecture of TDESR. + +their TDESR framework strategically implements reparameterization to improve super-resolution performance while preserving training efficiency. Following the reparameterization phase, they employ tensor decomposition for light-weight network design, where standard $3 \times 3$ convolutions are factorized into sequential $3 \times 1$ and $1 \times 3$ convolutional operations. + +As illustrated in Fig. 18, their architecture comprises five TD Blocks interspersed with three standard $3 \times 3$ convolutions, implementing a skip connection through elementwise addition between the input features (processed by a $3 \times 3$ convolution) and intermediate feature maps. The network maintains 64 channels throughout, with tensor decomposition intermediate channels reduced to 32 for computational efficiency. They integrate insights from Swift-SR's parameter-free attention mechanism [112] to enhance feature representation. The final reconstruction stage employs PixelShuffle with 48 input channels for high-quality image upsampling, completing their balanced design of performance and efficiency. + +Training Details. The training details of team LZ are as follows. + +- Base Training ( $\times 2$ upscaling) The model is initially trained for $\times 2$ super-resolution using randomly cropped $96 \times 96$ HR patches with a batch size of 32. They employ + +![](images/449a23221e1f675fa538f3ab016b13b78bd4d647f4c1a5ea675c158ab5a86d85.jpg) +Figure 19. Team Z6: Network architecture of GloReNet. + +the Adam optimizer to minimize the L1 loss, starting with an initial learning rate of $1 \times 10^{-4}$ that decays via Multi-StepLR scheduler at the mid-training point. The training completes over 100 epochs, utilizing re-parameterization techniques throughout the process. + +- Enhanced Resolution Training. Building upon the $\times 2$ pretrained weights, this phase increases the HR patch size to $128 \times 128$ while reducing the batch size to 16. All other hyperparameters (optimizer, learning rate schedule, and re-parameterization) remain consistent with Stage 1. The continued use of L1 loss maintains training stability during this resolution scaling phase. +- Convolutional Architecture Refinement. They implement standard $3 \times 3$ convolutional layers in this optimization stage, replacing previous architectural components. The training objective shifts to L2 loss minimization for fine-tuning, while preserving the fundamental network structure and parameter initialization from earlier stages. This transition enhances edge preservation in super-resolved outputs. +- Tensor Decomposition Optimization. The final refinement employs tensor decomposition techniques with dual loss supervision $(\mathrm{L1} + \mathrm{L2})$ . Training progresses with $256 \times 256$ HR patches using a reduced batch size of 16 and lower initial learning rate $(1 \times 10^{-5})$ . They implement cosine annealing scheduling for smooth convergence, completing the multi-stage optimization process through L2-loss-focused fine-tuning.. + +# 4.18.Z6 + +General Method Description. They introduce a lightweight and efficient image super-resolution (SR) network that leverages both global and local feature attention mechanisms to produce high-quality reconstructions. As depicted in Fig. 19, their network is divided into two main blocks named Global Feature Attention Block (GFAB) and Local Feature Attention Block (LFAB). + +GFAB is designed to capture large-scale context and dependencies across the entire image. Enhances globally significant features, helping the model learn the global information from input images. And LFAB can focus on refining fine-grained details and spatially localized information. Emphasizes subtle textural elements and sharp edges that are critical for upscaling. GFAB utilizes the parameter-free attention module (SPAN [111]) and LFAB uses Effi + +cient Spatial Attention (ESA) [72] to selectively highlight essential features. And all convolution layers applied reparameterization block [127]. The network begins with a series of convolution layers to extract initial features, which then pass through GFAB units for global attention. Subsequently, the output is processed by LFAB units for local attention, and finally, a PixelShuffle layer upscales the features to the target resolution. By combining these two parts, their method effectively preserves global context and local details, achieving a balance between high-quality reconstruction and efficient low computation. + +Training Description. Their training process employs a scratch training stage and a fine-tuning stage. In the first scratch training stage, they use DIV2K datasets for the training dataset. In the fine-tuning stage, they use DIV2K and the first 10K LSDIR datasets for the training dataset. All experiments are carried out in the same experimental environment. The training process is executed using RTX A6000 GPUs. They use the Pytorch 1.13 version for all training steps. + +- Scratch train stage: In the first step, their model is trained from scratch. The LR patches were cropped from LR images with an 8 mini-batch of $256 \times 256$ . Adam optimizer is used with a learning rate of 0.0005 during scratch training. The cosine warm-up scheduler is used. The total number of epochs is set to 2000. They use the $l1$ loss. +- Fine-tuning stage: In the second step, the model is initialized with the weights trained in the first step. To improve precision, they used the loss method $l2$ loss. This stage improves the value of the peak signal-to-noise ratio (PSNR) by $0.05 \sim 0.06$ dB. In this step, The LR patches are cropped from LR images with 32 mini-batch $512 \times 512$ sizes. And the initial learning rate is set to 0.00005 and the Adam optimizer is used in conjunction with a cosine warm-up. The total epoch is set to 200 epochs. + +# 4.19. TACO_SR + +General Method Description. The overall architecture of their network is showed in Fig. 20(a), inspired by SPAN [110] and PFDNLite [91]. Motivated by the design of the Conv3XC module in SPAN, they introduce two additional parallel branches with varying channel expansion ratios, resulting in a novel convolution module termed TenInOneConv, which fuses multiple convolution kernels into a single equivalent kernel to improve inference efficiency. Furthermore, to enhance the model's capability in capturing local texture and detail features, the LocalAttention module, inspired by PFDNLite is integrated, allowing the network to better focus on informative regions within feature maps. + +TenInOneSR employs four TenInOneBlock modules. Each of these blocks (detailed in Fig. 20(b)) begins with a LocalAttention module, which enhancing the network's ability to capture fine details. Subsequently, each block ap + +plies three cascaded TenInOneConv layers, interleaved with the SiLU activation function, to perform hierarchical feature refinement. The block concludes with a residual connection, allowing better gradient flow. + +Notably, the behavior of the TenInOneConv differs between the training and inference phases. During training (Fig. 20(d)), TenInOneConv operates in a multi-branch configuration. It introduces three parallel convolutional branches with different channel expansion ratios (gains set as 1, 2, and 3), along with an additional skip connection. This multi-scale feature extraction enables the network to better aggregate complementary spatial features. + +In the inference stage (Fig. 20(f)), for computational efficiency and faster runtime, these multiple convolution kernels are fused into a single equivalent convolution kernel. Specifically, the parallel branches and skip connection weights are mathematically combined to form one unified $3 \times 3$ convolutional kernel, significantly accelerating inference without compromising performance. + +Training description. The proposed architecture is trained on two NVIDIA RTX Titan GPUs with a total of 48 GB memory. In the first training stage, the DIV2K dataset is augmented by a factor of $85 \times$ and registered into the LSDIR format, resulting in a large-scale training set containing 152,991 high-resolution RGB images. During this stage, training is conducted with 64 randomly cropped $256 \times 256$ patches per batch, using common augmentations such as random flipping and rotation. The model is optimized using the Adam optimizer with L1 loss for a total of 100,000 iterations. The learning rate is initialized at $5 \times 10^{-4}$ and decayed by half every 20,000 iterations. In the second stage, they keep the training strategy and hyperparameters unchanged, except for increasing the input patch size to $384 \times 384$ and reducing the batch size to 32 to fit GPU memory. Then another 100,000 training iterations are conducted to further improve the model's performance on higher-resolution textures. + +# 4.20.AIOT.AI + +Method. The overall architecture of their network is shown in Fig. 21(a), inspired by the previous leading methods SPAN[112] and ECBSR[138]. They propose an Efficient channel attention super-resolution network acting on space (ECASNet). Specifically, on the basis of SPAB from SPAN, they combine edge-oriented convolution block (ECB) and regularization module (GCT) to form a new reparameterized feature extraction module named enhanced attention and re-parameterization block(EARB), as shown in Fig. 21(b). In addition, unlike SPAN, they find that using channel attention after feature map concatenating can significantly improve performance. For the sake of lightweight design, they use an efficient channel attention + +![](images/06848c39c978127dbf1a5777572509c2538e8cda239227794323ca26f32c9d74.jpg) +Figure 20. Team TACO_SR: The architecture of proposed TenInOneSR. + +![](images/5aeaf5ee1c5610a62b97273c69623209ee7d7db802eb52f815951d41d89ec85f.jpg) + +![](images/b2dafe71e9e3063b6f4e6e0d7fe0c81118d087eca6c5fbfdf98428625d1d76de.jpg) + +![](images/acb7e256b36e27fbb9227c2c97f2747745796ad01d4a69839743b8c4c6ab22db.jpg) + +![](images/49df396ea37e71a78da20d8f92ed483037fdc7b9986cdfebb18dda7676b67431.jpg) + +module, called the efficient channel attention module which acts on space(CAS), as shown in Fig. 21(c). + +Training Detail. The datasets used for training include DIV2K and LSDIR. Imitating the previous method, the training process is divided into two stages. In the first stage, they randomly crop $256 \times 256$ HR image blocks from the ground truth image, batch is 16, and randomly flipped and rotated them. Using Adam optimizer, set $\beta 1 = 0.9$ and $\beta 2 = 0.999$ , and minimize L1 loss function. The initial learning rate is set to 5e-4, and the cosine learning rate attenuation strategy is adopted. Epoch is set to 200. In the second stage, they changed the loss function to L2, and other settings are the same as those in the first stage. + +# 4.21.JNU620 + +General Method Description. They propose a reparameterized residual local feature network (RepRLFN) for efficient image super-resolution, which is influenced by existing studies such as RepRFN [19] and RLFN [55]. Fig. 22 + +illustrates the overall architecture of RepRLFN, which has been extensively validated in previous studies. + +They replace the RLFB in RLFN [55] with their reparameterized residual local feature block (RepRLFB). RepBlock is the main component of RepRLFB, which employs multiple parallel branch structures to extract the features of different receptive fields and modes to improve performance. At the same time, the structural re-parameterization technology is leveraged to decouple the training and inference phases to avoid the problem that computational complexity increases caused by the introduction of multi-branch. + +Training Strategy. The proposed RepRLFN consists of 4 RepRLFBs, with the number of feature channels set to 48. The details of the training steps are as follows: + +1. In the first stage, the model is pre-trained on DIV2K [4]. HR patches of size $480 \times 480$ are randomly cropped from HR images, and the mini-batch size is set to 32. The model is trained by minimizing the L1 loss function + +![](images/cd71cb66b2605332b1a6f6ce4dc15f144a84d5fa912a642fd7c10063bb2be48b.jpg) +(b) ECASNet + +![](images/d5280ba3c0c422f04fb814ae78615d895235df2419f06168de54ab34712b08dd.jpg) +(b) EARB + +![](images/f9533c63c96b88d6982d85f2095e9f195b9d8592e0275d4fb83d1ad4cc7289c3.jpg) +(c) CAS +Figure 21. Team AIOT.AI: Detailed architecture of the proposed ECASNet. + +![](images/8bd2ef8051dea56cf5a345b62b7708f0cb0526c294db8e195419e3b299cee319.jpg) +(d) RepConv + +using the Adam optimizer. The initial learning rate is set to 5e-4 and is halved every 200 epochs. The total number of epochs is 800. + +2. In the second stage, the model is fine-tuned on 3450 images from DIV2K [4] and Flickr2k [101] (DF2K) and the first 10k images from LSDIR [64]. HR patches of size $640 \times 640$ are randomly cropped from HR images, and the mini-batch size is set to 32. The model is fine-tuned by minimizing the L2 loss function. The initial learning rate is set to 2e-4 and is halved every 5 epochs. The total number of epochs is 25. +3. In the third stage, the model is fine-tuned again on 3450 images from DF2K and the first 10k images from LSDIR [64]. The HR patch size and minibatch size are set to $640 \times 640$ and 32, respectively. The model is fine-tuned by minimizing the L2 loss function. The initial learning rate is set to 1e-4 and is halved every 5 epochs. The total number of epochs is 20. +4. In the fourth stage, the model is fine-tuned on 3450 images from DF2K and the first $10\mathrm{k}$ images from LSDIR [64]. The HR patch size and minibatch size are set + +to $640 \times 640$ and 32, respectively. The model is fine-tuned by minimizing the L2 loss function. The learning rate is set to 5e-5, and the total number of epochs is 10. To prevent over-fitting, the model ensemble via stochastic weight averaging [46] (SWA) is performed during the last 8 epochs to obtain the final model for testing. + +# 4.22. LVGroup_HFUT + +General Method Description. The Swift Parameter-free Attention Network (SPAN) [112] introduces a novel parameter-free attention mechanism to address the tradeoff between performance and computational complexity, as shown in 23. SPAN employs symmetric activation functions (e.g., shifted Sigmoid) applied to convolutional layer outputs to generate attention maps without learnable parameters, enhancing high-contribution features while suppressing redundant information. Residual connections within each Swift Parameter-free Attention Block (SPAB) mitigate information loss and preserve low-level features. The lightweight architecture with cascaded SPABs achieves fast inference by avoiding parameter-heavy attention computa + +![](images/dfc46883933c059577cb6e6eeaa96eebd866af9c6728c75dfb5d979abc1dad54.jpg) +Figure 22. Team JUN620: The network architecture of RepRLFN + +![](images/de492b7ced705d8f7a88d48385420eb72c09a6323ea94714fef518a990277b96.jpg) +Figure 23. LVGroup_HFUT: The overall framework of SPAN. + +tions while maintaining reconstruction quality through hierarchical feature aggregation and pixel-shuffle upsampling. + +Training Details. They trained the SPAN model [112] on a mixed dataset composed of DIV2K [104] and LSDIR [64], setting feature_channels to 48, where the crop size of images is $256 \times 256$ . They used the Adam optimizer with L1 loss, an initial learning rate of 5e-4, and trained for a total of 1000k iterations, halving the learning rate every 200k iterations. Training was completed using a single NVIDIA RTX 4090 GPU. + +![](images/f9f256e2bdc25f83ac89a801417e8634751d782b6c46300dbea253a6df644900.jpg) +Figure 24. Team YG: The Spatial-gate self-distillation network (SGSDN). + +# 4.23.YG + +# 4.23.1. Method Details. + +The Primary idea of the proposed SGSDN is to explore nonlocal information in a SA-like manner while modeling local details for efficient image super-resolution. This section will start by introducing the overall architecture of SGSDN and then explain the SGM and ESD in detail. + +Network Architecture The overall structure of the SGSDN is shown in Fig. 24. It consists of three stages: shallow feature extraction, deep feature extraction, and image reconstruction. First, they use a $3 \times 3$ convolutional layer to extract shallow features, which is expressed as: + +$$ +\mathbf {I} _ {s} = F _ {\text {C o n v 3} \times 3} (\mathbf {I} _ {L R}), \tag {9} +$$ + +where, $F_{Conv3 \times 3}$ represents the shallow feature extraction module using a $3 \times 3$ convolutional layer. The obtained shallow feature is denoted as $\mathbf{I}_s$ . Subsequently, the extracted + +![](images/cba831f90e681281e70d07836d242412ebf29f6e98714494d5a04829c493e39c.jpg) +Figure 25. Team YG: The details of each component. (a) SGM: Spatial-gate modulation module; (b) ESD: Enhanced self-distillation module. + +shallow features are fed to several stacked SGSDBs to produce deep representative features, This process can be expressed as: + +$$ +\mathbf {I} _ {k} = F _ {S G S D B} ^ {k} \left(\mathbf {I} _ {k - 1}\right), k = 1, \dots , n, \tag {10} +$$ + +where, $F_{SGSDB}^{k}(\cdot)$ represents the $k$ -th SGSDB, $\mathbf{I}_{k-1}$ and $\mathbf{I}_k$ denote the input and output features of the $k$ -th SGSDB, respectively. Each SGSDB consists of three SGMs and an ESD. Given an input feature $\mathbf{I}_t$ , the mapping process of SGSDB can be represented as: + +$$ +\begin{array}{l} \mathbf {I} _ {d _ {1}} = F _ {S G M} (\mathbf {I} _ {t}), \\ \mathbf {I} _ {d _ {2}} = F _ {S G M} (\mathbf {I} _ {d _ {1}}), \\ \mathbf {I} _ {d _ {3}} = F _ {S G M} \left(\mathbf {I} _ {d _ {2}}\right) + \mathbf {I} _ {t}, \\ \mathbf {I} _ {o} = F _ {E S D} (\mathbf {I} _ {d _ {3}}) + \mathbf {I} _ {d _ {3}} \\ \end{array} +$$ + +where, $F_{SGM}$ represents the SGM, $F_{ESD}$ represents the ESD. After the deep feature extraction block, the representative features are processed by a $3 \times 3$ standard convolution layer and a pixel shuffle operation [94] to reconstruct the high-quality SR image. To take advantage of high-frequency information, they insert a long-distance residual connection before the image reconstruction module. The reconstruction stage is described as follows + +$$ +\mathbf {I} _ {S R} = F _ {\text {P i x e l S h u f f l e}} \left(F _ {\text {C o n v 3} \times 3} \left(\mathbf {I} _ {d} + \mathbf {I} _ {s}\right)\right), \tag {12} +$$ + +where $\mathbf{I}_d$ denotes the deep feature obtained by the stacked SGSDBs, and $F_{Conv3\times 3}(\cdot)$ indicates the $3\times 3$ standard convolution layer. $F_{PixelShuffle}(\cdot)$ is used to upscale the final feature and output the SR reconstructed image $\mathbf{I}_{SR}$ . + +Finally, to train the network, they use the $L_{1}$ loss function to minimize the pixel-level difference between the ground truth image $\mathbf{I}_{GT}$ and the reconstructed image $\mathbf{I}_{SR}$ , which can be expressed as: + +$$ +L _ {1} = \left\| \mathbf {I} _ {S R} - \mathbf {I} _ {G T} \right\| _ {1}, \tag {13} +$$ + +At the same time, they notice that only using the pixelwise loss function can not effectively generate more high-frequency details [15]. Thus, they accordingly employ a + +frequency constraint to regularize network training. The adopted loss function for the network training is defined as + +$$ +L = L _ {1} + \lambda \| \mathcal {F} (\mathbf {I} _ {S R}) - \mathcal {F} (\mathbf {I} _ {G T}) \|. \tag {14} +$$ + +where $\mathcal{F}$ represents the Fast Fourier Transform, and $\lambda$ is a weight parameter which is empirically set to 0.1. + +Spatial-gate modulation module Considering that the reason why the ViT-based model performs well is that SA explores non-local information and expands the effective receptive field of the model. They develop a lightweight spatial-gate modulation (SGM) module to collaboratively extract representative features, where the SAL branch exploits non-local features in a larger receptive field by integrating the dilated depth-wise convolutional layers with horizontal and vertical 1-D kernels, and the LKG branch captures local features in parallel. Moreover, to avoid potential block artifacts aroused by dilation, they adopt the gate mechanism to recalibrate the generated feature maps adaptively, as shown in Fig. 25. + +Given the input feature $\mathbf{I}_{in} \in R^{C \times H \times W}$ , where $H \times W$ denotes the spatial size and $C$ is the number of channels. Specifically, they first apply a normalization layer and a point-by-point convolution to normalize information and expand the channel. + +$$ +\mathbf {I} _ {1} = F _ {\text {C o n v 1} \times 1} \left(F _ {\text {N o r m}} \left(\mathbf {I} _ {\text {i n}}\right)\right), \tag {15} +$$ + +where, $F_{Norm}$ represents the $L_2$ normalization and $F_{Conv1\times 1}$ denotes a $1\times 1$ convolutional layer, $\mathbf{I}_1\in R^{2C\times H\times W}$ . Subsequently, the obtained features $\mathbf{I}_1$ are split into two parts along the channel dimension, this process can be expressed as: + +$$ +\mathbf {I} _ {x}, \mathbf {I} _ {y} = F _ {S} \left(F _ {G} \left(\mathbf {I} _ {1}\right)\right), \tag {16} +$$ + +where $F_{G}$ denotes the GELU activation function [38], $F_{S}$ denotes a channel splitting operation, $\mathbf{I}_x \in R^{C \times H \times W}$ and $\mathbf{I}_y \in R^{C \times H \times W}$ . They then process the features $\mathbf{I}_x$ and $\mathbf{I}_y$ in parallel via the SAL and LKG branches, producing the non-local feature $\mathbf{I}_n$ and local feature $\mathbf{I}_l$ , respectively. It is worth mentioning that the SAL and LKG branches only need to be responsible for half the input signals, and the parallel processing is faster. Finally, they fuse the non-local feature $\mathbf{I}_n$ and local feature $\mathbf{I}_l$ together with channel concatenation to form a representative output of the SGM module. This process can be expressed as, + +$$ +\mathbf {I} _ {S G M} = F _ {C} \left(\mathbf {I} _ {n}, \mathbf {I} _ {l}\right), \tag {17} +$$ + +where, $\mathbf{I}_{DSG}$ is the output feature and $F_{C}(\cdot)$ is the channel cascade operation. + +SA-like branch They exploit non-local features in a larger receptive field by integrating the dilated depth-wise convolutional layers with horizontal and vertical 1-D kernels. + +$$ +\begin{array}{l} \mathbf {I} _ {o} = F _ {D ^ {3} W C o n v 5 \times 1 1} \left(F _ {D W C o n v 5 \times 1} \right. \tag {18} \\ \left(F _ {D ^ {3} W C o n v 1 \times 1 1} \left(F _ {D W C o n v 1 \times 5} (\mathbf {I} _ {m})\right)\right)) \\ \end{array} +$$ + +where $F_{DWConv1 \times 5}(\cdot)$ denotes the DWConv layer with a kernel of size $1 \times 5$ , $F_{D^3 WConv1 \times 11}(\cdot)$ signifies the DWConv layer with a kernel of size $1 \times 11$ and the dilated factor is set to 3, $F_{DWConv5 \times 1}(\cdot)$ denotes the DWConv layer with a kernel of size $5 \times 1$ , $F_{D^3 WConv11 \times 1}(\cdot)$ signifies the DWConv layer with a kernel of size $11 \times 1$ and the dilated factor is set to 3. Given that increasing the convolution kernel directly will greatly increase the parameter and computation amount, as well as increase the inference time of the model, whereas utilizing the dilated depth-wise convolutional layers with horizontal and vertical 1-D kernels will alleviate the problem. In this way, the information extraction capability of the convolutional layer is further enhanced without greatly increasing the number of computations. Moreover, to avoid potential block artifacts arising from dilation, they adopt the gate mechanism to recalibrate the generated feature maps adaptively. Finally, they use a $1 \times 1$ convolution to distill the output feature for extracting the representative structure information $\mathbf{I}_n$ . + +$$ +\mathbf {I} _ {n} = F _ {\text {C o n v 1} \times 1} \left(\mathbf {I} _ {o} * \mathbf {I} _ {y}\right) \tag {19} +$$ + +where $*$ represents the element-wise product operation. + +Local spatial-gate branch Local details are important for the pleasing high-frequency reconstruction. As the SAL branch prioritizes non-local structure information exploration, they develop a simple local spatial-gate branch to capture local features simultaneously. In detail, a $3 \times 3$ depth-wise convolution is used to encode local information from the input features $\mathbf{I}_x$ . Then, they use the gate mechanism to generate the enhanced local feature. Finally, they use a $1 \times 1$ convolution with a GELU activation to distill the output features for extracting the representative detail information $\mathbf{I}_l$ , which is achieved by, + +$$ +\begin{array}{l} \mathbf {I} _ {o} = F _ {D W C o n v 3 \times 3} (\mathbf {I} _ {x}) * \mathbf {I} _ {y}, \\ \mathbf {I} _ {o} = F _ {D W C o n v 3 \times 3} (\mathbf {I} _ {x}). \end{array} \tag {20} +$$ + +$$ +\mathbf {I} _ {l} = F _ {G} \left(F _ {\text {C o n v 1} \times 1} \left(\mathbf {I} _ {o}\right)\right) +$$ + +where $F_{DWConv3 \times 3}(\cdot)$ denotes the DWConv layer with a kernel of size $3 \times 3$ , $F_{G}$ represents GELU activation function. + +Enhanced self-distillation module They present an enhanced self-distillation (ESD) module to expand and refine the features derived from the SGM in spatial and channel dimensions further. The ESD uses a $3 \times 3$ depth-wise convolutional to expand spatial and channel information. Then they use the GLUE activation function to introduce nonlinearity and extend the representation of the network. Finally, the output features are fed into a $1 \times 1$ convolution for further feature mixing and reducing the hidden channel back to the original input dimension. Given the input feature $\mathbf{I}_{in} \in R^{C \times H \times W}$ , this process can be formulated as, + +$$ +\mathbf {I} _ {l} = F _ {\text {C o n v 1} \times 1} \left(F _ {G} \left(F _ {\text {D W C o n v 3} \times 3} \left(\mathbf {I} _ {i n}\right)\right)\right) \tag {21} +$$ + +Training Details. Following previous works [66], they use the DF2K dataset, which consists of 800 images from DIV2K [4] and 2650 images from Flickr2K [70] as the training dataset. A sliding window slicing operation is used to decompose each HR image into $480 \times 480$ patches for training. The LR images are obtained by downsampling the HR images using the MATLAB bicubic kernel function. + +During the training, random rotation and horizontal flipping are used for data augmentation. The proposed SGSDN has 8 SGSDBs, in which the number of feature channels is set to 24. They start by pretraining the model on the DIV2K and Flickr2K datasets. The mini-batch size is set to 64. The model is trained by the ADAN optimizer [124] with $\beta_{1} = 0.98$ , $\beta_{2} = 0.92$ and $\beta_{3} = 0.99$ , and the exponential moving average (EMA) is set to 0.999 to stabilize training. The initial and minimum learning rates are set to $5 \times 10^{-3}$ and $1 \times 10^{-6}$ , respectively, and decay according to cosine learning rate. The model is optimized using a combination of the $L_{1}$ loss and an FFT-based frequency loss function [15] for a total of $1 \times 10^{6}$ iterations. The size of the randomly cropped LR patches is $64 \times 64$ . + +They then conduct fine-tuning on the DIV2K dataset and the first 10k images from LSDIR [64]. The input size is set to $96 \times 96$ , with a batch size of 32. The fine-tuning process optimizes the model by starting with an initial learning rate of $3 \times 10^{-3}$ , while keeping the rest consistent with pretraining. The fine-tuning phase encompasses a total of 100k iterations. They implemented our model on an NVIDIA RTX 3090 GPU using Pytorch. + +# 4.24. NanoSR + +Network Architecture. Their network architecture is inspired by SPAN [112] and PAN [142]. While maintaining the overall design of SPAN, they replace the SPAB block with the RepBlock. The RepBlock consists of a feature extractor using reparameterized convolution and a reparameterized pixel attention module. During training, the RepBlock operates in a complex mode to achieve better quality performance but can be equivalently transformed into a simple mode with fewer parameters and FLOPs. The detailed network architecture is illustrated in Fig. 26. + +Reparameterized Convolution. Reparameterized convolution plays a crucial role in improving the performance of efficient CNN-based super-resolution networks. They employ the RepMBCov introduced in PlainUSR [120], and this RepMBCov forms all the convolutions in the RepBlock. In addition, RepMBCov is derived from MobileNetV3 [39] Block (MBConv). The architecture of RepMBCov is depicted in Fig. 27. + +Implementation Details. They train the model using all 85,791 image pairs from the DIV2K and LSDIR datasets. Each image pair is cropped into $480 \times 480$ sub-patches for training. During each training batch, 64 HR RGB patches + +![](images/4b7b31467018d189fc76958a39af5df580c215fa4fa8a574934ee8b7d2b699dd.jpg) +Figure 26. Team NanoSR: The network architecture of RepRLFN + +![](images/f41c0d497ea15477ece5f1fc75a7c7d46d314cd74ff65e50caa4891ff1ad9ef1.jpg) +Figure 27. Team NanoSR: The network architecture of RepRLFN + +of size $128 \times 128$ are randomly cropped and augmented with random flipping and rotation. The optimization objective is the $\ell_1$ loss, and they use the AdamW optimizer ( $\beta_{1} = 0.9$ , $\beta_{2} = 0.99$ ) to train NanoSR. The learning rate is initialized at $5 \times 10^{-4}$ and halved at $\{250\mathrm{k}, 400\mathrm{k}, 450\mathrm{k}, 475\mathrm{k}\}$ iterations within a total of $500\mathrm{k}$ iterations. The proposed + +method is implemented using the PyTorch framework on a single NVIDIA RTX 4090 GPU. + +# 4.25. MegastudyEdu_Vision.AI + +General Method Description. To effectively model long-range dependency and extensive receptive field, inspired by CFSR [122], they propose the multi-scale aggregation attention network (MAAN), as illustrated in Fig. 28. MAAN reconstructs high-quality images through a shallow feature extractor, a stack of three residual multi-scale aggregation blocks (RMAB) composed of multi-scale aggregation attention layers (MAAL), a large separable kernel attention tail (LSKAT), and an image reconstruction module. Specially, MAAL captures global and local details via a multi-scale mixer and efficient feed-forward network (EFN) [122]. Given a low-resolution input image $I_{LR} \in \mathbb{R}^{3 \times H \times W}$ , shallow features such as edges, textures, and fine details are extracted using a $3 \times 3$ convolution in the shallow feature extraction stage and passed to the MAAL. As shown in Fig. 28, the MAAL processing pipeline begins with an input $X$ , applying layer normalization, followed by a $1 \times 1$ convolution and splitting the feature map into four groups + +![](images/77c96b623860559eacc549d8e973b55a52ac782e82292d36ce71b6afab9761ca.jpg) +Figure 28. Team MegastudyEdu_Vision.AI: Overview of multi-scale aggregation attention network. + +along the channel dimension: + +$$ +V = \operatorname {C o n v} _ {1 \times 1} (X), +$$ + +$$ +F _ {\text {g a t e}} = \operatorname {C o n v} _ {1 \times 1} (X), \tag {22} +$$ + +$$ +\begin{array}{l} F _ {i d}, F _ {g a t e 1}, F _ {g a t e 2}, F _ {g a t e 3} = \operatorname {S p l i t} (F _ {g a t e}), \\ = F _ {: g}, F _ {g: 2 g}, F _ {2 g: 3 g}, F _ {3 g}. \\ \end{array} +$$ + +Here, $F_{id}$ is the identity mapping without channel modification. The channel count used in convolution branches, denoted as $g$ , is determined by a ratio $r_g$ , computed as $g = r_g C$ . They set $r_g$ to 0.25. Subsequently, each branch is processed using large separable kernel (LSK), inspired by large separable kernel attention (LSKA) [57]: + +$$ +F _ {i d} ^ {\prime} = F _ {i d}, +$$ + +$$ +\begin{array}{c} F _ {g a t e 1} ^ {\prime} = L S K _ {1 1, 2} \left(F _ {g a t e 1}\right), \\ \end{array} \tag {23} +$$ + +$$ +F _ {g a t e 2} ^ {\prime} = L S K _ {2 3, 3} \left(F _ {g a t e 2}\right), +$$ + +$$ +F _ {g a t e 3} ^ {\prime} = L S K _ {3 5, 3} \left(F _ {g a t e 3}\right), +$$ + +where $LSK_{k,d}$ indicates the kernel size $k$ and dilation factor $d$ . Each LSK is composed of consecutive $1 \times k$ depth-wise convolution, $k \times 1$ depth-wise convolution, $1 \times k$ dilated depth-wise convolution, and $k \times 1$ dilated depth-wise convolution. The distinct kernel sizes and dilation factors across branches effectively handle multi-scale features. + +After concatenating the outputs from each branch, the combined result is integrated with $V$ through an element-wise product. Subsequently, $1 \times 1$ convolution is applied to obtain the final output as follows: + +$$ +F _ {o u t} = \operatorname {C o n v} _ {1 \times 1} \left(V \odot \operatorname {C o n c a t} \left(F _ {i d} ^ {\prime}, F _ {\text {g a t e} 1} ^ {\prime}, F _ {\text {g a t e} 2} ^ {\prime}, F _ {\text {g a t e} 3} ^ {\prime}\right)\right) \tag {24} +$$ + +This $F_{out}$ is then fed into EFN [122]. For further EFN details, refer to CFSR [122]. + +While CFSR [122] employs a $3 \times 3$ convolution tail for deep feature extraction, it has limitations in establishing long-range connections, restricting the representational capability of reconstructed features. To overcome this, they propose LSKAT inspired by the large kernel attention tail(LKAT) [119], as depicted in Fig. 28. + +Training Details. Their approach leverages DIV2K[103], Flickr2K[70], and the first 10K portion of LSDIR[64]. In each RMAB, the number of channels, RMABs, and MAALs are set to 48, 3, and 2-3-2, respectively. During training, they used 256 HR RGB patches with a batch size of 64. Data augmentation included random flips and rotations. Parameters are optimized using the L1 loss and the Adam optimizer[54]. The learning rate started at $1 \times 10^{-3}$ and decreasing to $1 \times 10^{-6}$ using a cosine annealing scheduler. The network is trained for 1,000K iterations, implemented in PyTorch, and executed on an NVIDIA RTX 3090 GPU. + +# 4.26.MILA + +General Method Description. As shown in Figure 29, inspired by the efficient approximation of self-attention (EASA) [144], they introduce local variance and design LVSA. Additionally, inspired by MDRN [81] and AGDN [114], they consider the impact of multi-level branches on performance. Therefore, they design a multi-level variance feature modulation block that incorporates non-local information with local variance perception at two different levels. This design aims to better leverage the interplay between local and non-local features while balancing performance and model complexity. + +The gated-dconv feed-forward network (GDFN) [132] introduces gating mechanism and depth-wise convolutions to encode information from spatially adjacent pixel posi + +tions, which is highly useful for learning local image structures to achieve effective restoration. However, the single gating structure is relatively simple and cannot effectively capture and blend local contextual information. Therefore, they propose the symmetric gated feed-forward network. + +Training Description. The proposed MVFMNet has 6 FMMs, in which the number of feature channels is set to 26. The details of the training steps are as follows: + +1. Pretraining on the DF2K and the first 1k images of LSDIR datasets. HR patches of size $256 \times 256$ are randomly cropped from HR images, and the mini-batch size is set to 64. The model is trained by minimizing L1 loss and the frequency loss [14] with Adam optimizer for total 100k iterations. They set the initial learning rate to $1 \times 10^{-3}$ and the minimum one to $1 \times 10^{-6}$ , which is updated by the Cosine Annealing scheme [78]. +2. Finetuning on the DF2K and the first 1k images of LSDIR datasets. HR patch size and mini-batch size are set to $256 \times 256$ and 64, respectively. The model is fine-tuned by minimizing the L2 loss function. The learning rate is initialized at $2 \times 10^{-5}$ and gradually decreased to $1 \times 10^{-8}$ over 500k iterations using the Cosine Annealing scheme. + +# 4.27. AiMF_SR + +Method Details. They propose a novel Mixture of Efficient Attention (MoEA) architecture for efficient superresolution tasks. The architecture includes a shallow feature extractor, multiple Feature Representation Modules (FRMs), and an efficient reconstruction and upsampling module. Initially, a shallow $3 \times 3$ convolutional layer reduces computational load, generating compact feature representations. Deep feature extraction employs transformer-inspired blocks with pre-normalization, incorporating Mixture-of-Experts (MoE) Blocks [131] for efficient attention and Depth Feed Forward Networks (DepthFFN) for capturing depth-wise interactions. Details of the architecture can be seen in Fig. 30. + +The MoEBlock consists of two parallel feature pathways (Fig. 30). The input features $x$ are first projected into two distinct feature sets $x_{a}$ and $x_{b}$ using a pointwise convolution. The first branch, $x_{a}$ , undergoes both adaptive average and max pooling followed by depth-wise convolutions. The pooling is done in scale of 8 [145]. These pooling layers followed by depth-wise convolutions serve as efficient attention-like mechanism. Then, it combines these features through element-wise addition, nonlinear activation (GELU), and interpolation. The second branch, $x_{b}$ , is processed via depth-wise and pointwise convolutions with GELU activation. + +$$ +\begin{array}{l} x _ {a} = \operatorname {D W C o n v} \left(\operatorname {A v g P o o l} \left(x _ {a}\right)\right) + \operatorname {D W C o n v} \left(\operatorname {M a x P o o l} \left(x _ {a}\right)\right), \\ x _ {a} ^ {\prime} = \mathcal {U} (\mathcal {G} (\operatorname {P W C o n v} (x _ {a}))), \\ x _ {a} ^ {\prime} = \operatorname {P W C o n v} \left(x _ {a} ^ {\prime}\right), \\ x _ {b} ^ {\prime} = \mathcal {G} (\operatorname {P W C o n v} (\operatorname {D W C o n v} (x _ {b}))), \\ x _ {a b} = \mathcal {C} \left(x _ {a} ^ {\prime}, x _ {b} ^ {\prime}\right). \tag {25} \\ \end{array} +$$ + +where $x_{a}, x_{b}$ are concatenated and passed through the Router (gating network), $\mathcal{R}$ , which adaptively selects the top- $k$ expert paths based on the channel-wise global average-pooled features in the MoE-layer. Each selected expert independently processes $x_{a}'$ and $x_{b}'$ through pointwise convolutions, multiplies them element-wise, and applies a final convolution for feature integration: + +$$ +\begin{array}{l} \operatorname {l o g i t s} = \mathcal {R} (x _ {a b}), \\ x _ {a} ^ {\prime}, x _ {b} ^ {\prime} = \operatorname {T o p K} (\operatorname {S o f t m a x} (\log_ {i} i)) \\ \operatorname {E x p e r t} \left(x _ {a} ^ {\prime}, x _ {b} ^ {\prime}\right) = \operatorname {P W C o n v} \left[ \operatorname {P W C o n v} \left(x _ {a} ^ {\prime}\right) \times \operatorname {P W C o n v} \left(x _ {b} ^ {\prime}\right) \right] \tag {26} \\ \end{array} +$$ + +Multiple FRMs (LayerNorm-MoEBlock-LayerNorm-DepthFFN sequences) are stacked for deep feature extraction. For reconstruction, global contextual features from deep extraction combine with shallow features via residual connections, followed by PixelShuffle-based upsampling to produce high-resolution outputs. The model uses GELU activation, Layer Normalization. Their MoE layer dynamically routes features across numExperts $= 3$ , selecting the top $k = 1$ experts at training time, allowing a flexible and adaptive processing pipeline tailored specifically to input feature characteristics. + +Training Strategy. The model is trained and tested on BasicSR [115] setting. First, the model is initially trained on DIV2K_LSDIR_x2, then further finetuned with DIV2K_LSDIR_x3 dataset for 500,000 iterations respectively, in which these scales are made with bicubic downsampling. The x4 scale model is finetuned on top of the x3 model over 500,000 iterations with the initial learning rate of $1 \times 10^{-3}$ using the Adam optimizer. The learning rate decayed at iterations [250,000, 400,000, 450,000, 475,000]. The training pipeline included data augmentations such as random horizontal flips, vertical flips and rotations. The model is optimized using L1 Loss and Fast Fourier Transform (FFT) Loss [95] with 1.0 and 0.1 weights, respectively. All reported implementations are carried out using Python (version 3.9) programming language and PyTorch Framework, utilizing one RTX4090, 24GB VRAM and 16-core CPU. Training is conducted over approximately 23 days with a single GPU of batch size of 16. + +![](images/c40b65ab994e2395bc7a92b4cda211ba91920ed236cf7a59b3f243698618b855.jpg) + +![](images/01eacf3cccd113c242f920e122e9dd250100f74c8f5e7f603cf5c928a27f10c7.jpg) + +![](images/a2c01cfe6e304254748277a8f823d85316e6c8e8774d19c15de4428bf13a838f.jpg) + +![](images/f872827f0bbe062c88b104ed0c499d216e8b302b4a8aa46faf3793bedbc4cf18.jpg) + +Adaptive Max Pooling + +![](images/50d512d971059ff34fadef4244b80d50193d8309a9a13d88ca8dfe1dfda61946.jpg) +Figure 29. Team MILA: Network architecture of the proposed MVFMNet. + +Local Variance + +![](images/47a8f5767564f21bec0d29e6ae704dea3d6b3837b42286dffc841e399e9070d8.jpg) + +Channel Concatenate + +![](images/12903d3b3084e11b5f896b87149ea6ab71e21ca200bfcd42111d265b7224d8f0.jpg) + +Element-wise Addition + +![](images/417ed3a4f6c7a940301d0c59980746b58cddeb2e843610fa81ff47d3e406ebca.jpg) + +Nearest Up-sampling + +![](images/2d645b7edb13434af06462871050d1113b7f8262449bfe3d9336cd691170f68d.jpg) + +Chanel Split + +![](images/5c4fffafcd87b05c0eb660d6ac051e73323321b85c6274fe2d1d425f63153e14.jpg) + +GELU Activation + +![](images/7b7ae85f98846211c1488657fe8c48f7df95a130ea033e4222c2cc126ad7abe3.jpg) + +Element-wise Product + +# 4.28. BVIVSR + +Method Description. Their solution is built on the advances in state-of-the-art single-image super-resolution (SISR) methods [11, 18, 87, 141, 149], particularly the efficient Transformer-based models [52, 139], the continuous super-resolution approaches, such as HiIF [49, 52], and the knowledge distillation strategies [48, 50, 51]. They employ an efficient Transformer-based network architecture, as illustrated in Fig. 31, where the core component is the Hierarchical Encoding Transformer (HiET) layer. The HiET layer was first proposed in [52] and it is specifically designed to capture rich structural dependencies across various regions of the image, enabling the model to handle complex visual patterns effectively. To enhance the capacity of the model for multi-scale feature representations, each HiET layer is set with different window sizes, allowing it to attend to both local and global contexts. Furthermore, the overall architecture incorporates a modified U-Net structure, where skip connections are introduced between symmetric HiET layers at different depths. This design facilitates efficient multi-level feature fusion and ensures better preservation and reconstruction of fine-grained details + +in the super-resolved outputs. In addition, they also apply the multi-teacher knowledge distillation strategy [48] to improve the performance of the lightweight C2D-ISR model, where SRFormer [147], MambaIR [32] and EDSR [70] are employed as teacher networks. + +Training Details. They use the DIV2K [102], 1000 2K images from BVI-AOM [82], Flickr2K [70] and 5000 images from LSDIR[64] as training dataset. For evaluation, they follow common practice and employ the DIV2K validation set (containing 100 images) [102]. The maximum learning rate is set to $4 \times 10^{-4}$ . The learning rate follows a cosine annealing schedule, gradually decreasing after an initial warm-up phase of 50 epochs. They use L1 loss and the Adam [54] optimization during training. Training and testing are implemented based on 4 NVIDIA 4090 GPUs. The model comprises 154.8K parameters with an input size of $64 \times 64 \times 3$ and it was trained for 1000 epochs with 16 batch sizes per GPU. The training of their solution contains five stages: + +- Training the teacher networks, including SRFormer [147], MambaIR [32] and EDSR [70], by using the original settings in their papers; + +![](images/253833a7e355d218a6f8858267ec14826e3afdeb8733a9e206f03c2e38f8543b.jpg) +Figure 30. Team AiMF_SR: Main Figure of Proposed Architecture, Mixture of Efficient Attention. + +![](images/6ddd8335873af8c5a39067d510e2bb84a138cae90a3d4c71cc8cbfc3b65e5ffc.jpg) +Figure 31. Team BVIVSR: The structure of the method. + +- The teacher aggregation of multi-teacher knowledge distillation (MTKD) strategy [48] was adapted to the above teacher networks to obtain an enhanced teacher network; +- Training the lightweight C2D-ISR model [52] on continuous scales i.e, from $\times 2$ to $\times 4$ , to learn the correlation between multiple scales and better recover high-frequency details; +- The learned C2D-ISR model was distilled by the MTKD strategy [48] with their enhanced teacher network to obtain the enhanced student model; +- Finetuning the enhanced student model by increasing the patch size from $64 \times 64$ to $128 \times 128$ . + +# 4.29.CUIT_HTT + +General Method Description. The overall architecture of the proposed method is illustrated in Fig. 32(a), which consists of three main components: the shallow feature extraction module, the deep feature extraction module, and the reconstruction and upsampling module. The shallow feature extraction module employs a BSConv [34] module to extract low-level features such as edges and textures from the input image $I^{in} \in \mathbb{R}^{3 \times H \times W}$ , mapping it to the feature space $f^0 \in \mathbb{R}^{C \times H \times W}$ for further processing. The extracted shallow features are then fed into the deep feature extraction module, which is composed of multiple Frequency-Segmented Attention Blocks (FSABs) designed in this work. The outputs of each FSAB are concatenated + +![](images/f515d21064efcb5f99d823286f26e5e7ffc92eccff0a11ad3be8c813baca6d94.jpg) + +![](images/e71224c1294caaddfff3a7868d78eb34cb3399b9a3e70cfae73f0f5093a5bc12.jpg) +Figure 32. Team CUIT_HT: Schematic Diagram of the Method. (a) Overall Architecture of the Model; (b) Frequency-Segmented Attention Block (FSAB); (c) Schematic of the Enhanced Large-kernel Convolution Block (ELCB); (d) Mechanism of Frequency-Segmented Attention (FSA); (e) Frequency Division and Frequency Recombination. + +![](images/cb4d48375367e6c3831213b678c17fc76e97295cb24d5c23662571f9e2f19896.jpg) + +![](images/0bdef5b2be98fa47128c19bf1077c8c10251c4776405671e6a96c0507aeb5481.jpg) + +![](images/e8b1e59898d3b68eb6fd482318b1fd061e804edafc865f2b99d918a5991b38f5.jpg) + +along the channel dimension and adjusted using a convolutional module group, constituting the deep feature extraction process. As shown in Fig. 32(b), the FSAB structure includes a Concat operation for channel concatenation and a ConvB module group, which consists of a $1 \times 1$ convolution, a GELU activation function, and a BSCov stacked sequentially. Finally, the output of the shallow feature extraction module is added element-wise to the output of the deep feature extraction module via a skip connection and passed to the reconstruction and upsampling module. This module upsamples the feature space information $f^{out} \in \mathbb{R}^{C \times H \times W}$ and maps it to the high-resolution output image $I^{SR} \in \mathbb{R}^{3 \times scale \times H \times scale \times W}$ , where scale is the upscaling factor. In this work, the PixelShuffle method is utilized for upsampling. + +The Frequency-Segmented Attention Block (FSAB) primarily consists of an information distillation architecture for local feature processing and the proposed Frequency-Segmented Attention (FSA) mechanism for global feature processing. The overall architecture of FSA is illustrated in Fig. 32 (d). The input feature map is first transformed into the frequency domain via the Fast Fourier Transform (FFT), enabling global processing in the spatial domain through frequency domain operations. Inspired by windowed attention, the FDivision operation partitions the frequency spec + +trum into multiple windows, which are concatenated along the channel dimension. A grouped convolution is then applied to process features in different frequency ranges using distinct weights. Subsequently, the FRecombination operation reassembles the segmented frequency windows back into the spectrum. A convolutional layer is applied, and the result is added element-wise to the original spectrum. Finally, the Inverse Fast Fourier Transform (IFFT) is used to convert the processed features back to the spatial domain, and the output is obtained through elementwise multiplication with the original input. As for the information distillation architecture, they adopt the structure of the Residual Feature Distillation Block (RFDB) from RFDN [71], as shown in Fig. 32. (b). However, they replace the convolutional layers with Enhanced Large-kernel Convolution Blocks (ELCB). This module employs large-kernel depthwise convolution on half of the channels and pointwise convolution on the full channels, achieving a large receptive field without significantly increasing the number of parameters. Additionally, structural reparameterization is utilized during training, where multiple branches with different receptive fields are employed. During inference, these branches are equivalently replaced with a single large-kernel convolution module, thereby enhancing the model's learning capability without increasing inference cost. + +Train details. They utilize the DIV2K [4] and Flickr2k [101] dataset and the first 10K images from the LSDIR [64] dataset as the training set for their model. During training, the dataset undergoes random horizontal flipping and $90^{\circ}$ rotation. The mini-batch size and input patch size are set to 64 and $64 \times 64$ , respectively. The model is optimized using the L1 loss function and the Adam optimizer, with an initial learning rate of $5 \times 10^{-3}$ . The learning rate follows a cosine annealing decay schedule over a total of 1000K iterations. Subsequently, the model is fine-tuned using the L2 loss to achieve improved performance. Training is conducted using PyTorch 1.12.1 on a Tesla P100 16G GPU. + +# 4.30. GXZY.AI + +General Method Description. The GXZY AI team proposed a Parameter-free Vision Mamba, as shown in Fig. 33. The work is inspired by MambaIR [33], SPAN [112] and DVMSR [59], PFVM consists of three parts, shallow feature extraction, deep feature extraction and reconstruction module. Shallow feature extraction is achieved by $3 \times 3$ convolution, followed by the use of stacked Residue State Space Blocks (RSSBs), which contain the Vision State Space Module (VSSM) to extract deeper features through the capability of Mamba long-range modeling. Then the shallow and deep features are aggregated by a $3 \times 3$ convolution along with residual concatenation, and finally upsampling is achieved through a sub-pixel convolutional layer to reconstruct the high resolution image. + +As shown in Fig. 34, different from the RSSB used in DVMSR, PFVM does not use stacked ViMM modules, but follows the design paradigm of the RSSB in MambaIR, which differs from MambaIR in that 3-residue branching is used in order to maximize the ability of residual learning. In order to obtain better PSNR with approximate inference time, the convolution layer adopts the bottleneck structure, and the channel attention used in MambaIR is replaced by a parameter-free attention. + +Training Strategy. In the training phase, the GXZY AI team uses the LSDIR [64] dataset for training and the DIV2K [3] validation set for validation. The images in the training set are first cropped with a step size of 240 and a size of 480 to get a series of cropped images. The model was trained on 2 NVIDIA RTX 3090 GPUs. The details of the training steps are as follows: + +1. The HR images are randomly cropped to size 192, and the dataset is augmented using random flipping and rotation. The model is trained from scratch with a batch size set to 64, using the Adam optimizer with the learning rate set to 0.0001, $\beta_{1} = 0.9$ , $\beta_{2} = 0.99$ , and a Multi-StepLR scheduler with the learning rate halved for every 200,000 iterations for a total of 1,000,000 iterations. The loss function uses L1 loss. + +2. On the basis of the first step, the model with the optimal PSNR on the DIV2K validation set is loaded as the pre-training model, the size of HR image cropping is adjusted to 256, the learning rate is 0.0002, the learning rate is halved for every 100,000 iterations, and the loss function is still used for 1,000,000 iterations with L1 loss. + +# 4.31. IPCV + +This team uses HiT-SR: Hierarchical Transformer for Efficient Image Super-Resolution [140] for this challenge. The Hierarchical Transformer for Efficient Image Super-Resolution (HiT-SR) is a deep learning model designed to upscale low-resolution (LR) images into high-resolution (HR) outputs while maintaining efficiency and high-quality reconstruction. Unlike traditional convolutional neural networks (CNNs), which struggle to capture long-range dependencies, HiT-SR employs a hierarchical self-attention mechanism that efficiently processes multiscale image features. This allows the model to integrate local and global information, improving image detail reconstruction while reducing computational costs. + +At the core of the network is a hierarchical feature learning process, where image features are extracted and refined progressively through multiple stages. Instead of applying full-resolution self-attention, which is memory intensive, HiT-SR reduces token complexity using patch merging and downsampling modules, allowing efficient computation without loss of essential information. The model further refines these hierarchical features through multiscale self-attention mechanisms, ensuring that fine-grained details and global structures are effectively captured. + +For the final super-resolution reconstruction, HiT-SR aggregates and progressively upsamples the processed features. This multistage refinement approach ensures that high-frequency details are preserved while preventing artifacts common in naive upsampling techniques. The resulting HR image maintains sharp edges, realistic textures, and minimal distortions. They have used available pre-trained model weights [134] on the low resolution images of the test data set and predicted high resolution images. + +# 4.32. X-L + +General Method Description. Their proposed partial permuted self-attention network (PPSA-Net) is shown in Fig. 35. PPSA-Net is inspired by two works: SR-Former [147] and PartialConv [9]. SRFormer is a lightweight super-resolution (SR) approach, but it inevitably still has significant redundancy in feature dimensions. To address this, they combine the strengths of PartialConv to further reduce the complexity and the computational cost. Specifically, they use a feature encoder to process the low-resolution image and feed it to four partial per + +![](images/41a39eaf7e655d5f51d27cf6b8fee4f71730fb177f34e3c0bae53e75a366f369.jpg) +Figure 33. Team GXZY.AI: The structure of PFVM. + +![](images/04628618f2817ad511739ed0c8edd934bec4a877efa1648b1428c4283d5c12db.jpg) +Figure 34. Team GXZY AI: The structural details of MambaIR and DVMSR. + +muted self-attention (PPSA) layers, before finally feeding it into a feature decoder to obtain the final result. In more detail, within each PPSA layer, they use channel split to divide the original features into two sub-features: one comprising $1/4$ of the channels and the other comprising $3/4$ of the channels. The $1/4$ sub-feature is processed by a permuted self-attention block [147], while the $3/4$ sub-feature remains unchanged. After processing, the two sub-features are concatenated back together. This design allows us to efficiently reduce computational overhead while maintaining the model's ability to capture both local and global information, leading to high-quality SR results. + +Training details. They follow the same training procedure as SRFormer [147]. However, they conduct their training + +using a single NVIDIA 4090 GPU. + +# 4.33.Quantum_Res + +Method Details. In this work, they propose a novel student-teacher framework for super-resolution, as shown in Fig. 36 that enables a lightweight student model to achieve better performance comparable to heavier models. Specifically, to adopt this architecture, they used MambaIRv2-Light [32] as the student model, while MambaIRv2-base [32] serves as the teacher. While they use MambaIRv2-light as an efficiency, their key contribution is demonstrating that a guided student-teacher learning strategy can significantly improve SR performance while keeping model complexity low. [108] + +![](images/496abd7bd595907443e0670187e23f1f4d56c40d11a3074d787694ea1ce40318.jpg) +Figure 35. Team X-L: Overview of the proposed PPSA-Net. + +![](images/5f12b3c080f9cb9562c8a1755391e2e1c37748366d569130074f4dda3ff55992.jpg) +Figure 36. Team Quantum_Res: The overall pipeline of efficient super-resolution approach, which employs a student-teacher training paradigm. The high-capacity Teacher Network (MambaIRv2-B) learning is transferred to the lightweight Student Network (MambaIRv2-Light) using knowledge distillation. The student network is optimized using L1 loss to ensure accurate superresolution while maintaining efficiency. The input low-resolution (LR) database serves as the training input, guiding the student model to achieve high-fidelity reconstruction with reduced computational complexity. + +The student model extracts the initial low-level features from the input low-resolution image using the $3 \times 3$ convolutional layer. The core of the network comprises a series of Attentive State-Space Blocks (ASSBs) [32] to capture long-range dependencies efficiently. For each block, residual connections are used to facilitate stable gradient propagation. Finally, a pixel-shuffle-based upsampling module reconstructs the final high-resolution image. [32] + +The teacher model, MambaIRv2, follows the same architectural design but with increased depth and wider feature dimensions. This model has significantly more parameters and serves as an upper-bound reference for the student. + +Teacher-Guided Inference. The teacher model remains frozen throughout training and is only used as a qualitative reference to validate architectural choices and improvements. The student model inherits refined architectural principles from the teacher rather than weight transfer or feature alignment. This allows the student to retain its original lightweight nature while benefiting from structural knowledge obtained from a larger-capacity model [108]. + +Inference Strategy. During inference, an efficient patch-based processing method is applied to handle high- + +resolution images. Given an input image, it is divided into overlapping patches. Each patch is processed independently by the student network, and final predictions are blended using a weighted averaging scheme to ensure seamless reconstruction. [32] + +Training Details. The student model is initialized using pre-trained weights of MambaIRv2-light. The teacher model is loaded with pre-trained weights from a high-performing MambaIRv2-base variant. Fine-tuning was performed on DIV2K and LSDIR, with the number of feature channels set to 48. The training was conducted on patches of size $192 \times 192$ extracted from high-resolution images, using a batch size of 8. The model is finetuned by minimizing the L1 loss function using the Adam optimizer. The initial learning rate is set to $1 \times 10^{-5}$ and is reduced when training iterations reach specific milestones, following a Multi-StepLR decay strategy with a factor of 0.5. The total number of iterations is 150K. The teacher model is only used as a reference for guiding architectural refinement and remains frozen throughout the training. + +# 4.34. SylabSR + +Method. Inspired by RLFN [56] and VARSR [88], they propose an AutoRegressive Residual Local Feature Network (AR-RLFN) to implement a two-stage super-resolution framework. Specifically, they build a lightweight version of RLFN targeting $2 \times$ super-resolution, meaning that the final $4 \times$ SR image is generated from an intermediate $2 \times$ SR image produced by the same model. The overall framework of AR-RLFN is shown in Fig. 37. Although the model needs to be run twice, the $2 \times$ SR task requires significantly fewer parameters and FLOPs compared to the original one, making the approach efficient overall. + +The modified structure of RLFN is further inspired by R2Net [91]. Benefiting from the two-stage strategy, their model is able to operate with fewer parameters. In their framework, they adopt three Residual Local Feature Blocks (RLFBs) with a reduced number of channels compared to the original version. Additionally, they replace ReLU with LeakyReLU to mitigate gradient vanishing. For reparameterization, they employ the Residual-in-Residual Rep Block + +![](images/17821671883df0836f9272ab356f13d8f4e20543a0f3382103eb12be5c27e5b0.jpg) +Figure 37. Team SylabSR: The structure of (up) AR-RLFN, (a) RLFB, (b) RRRB and (c) its reparameterization. + +(RRRB) [26] for improved compression, which reduces the number of parameters during inference by approximately $45\%$ . + +Training Strategy. They train their network on DIV2K [104] and LSDIR [64] datasets, and augment the training data using random flipping and rotation. The training process is divided into three stages: + +1. HR patches of size $512 \times 512$ are randomly cropped from the ground truth DIV2K images. In this stage, the model performs $2 \times$ super-resolution. The number of channels in the RRRB is set to 12, and the batch size is set to 32. They use the Adam optimizer to minimize the Charbonnier loss, with the learning rate set to $5\mathrm{e}^{-4}$ . The training runs for 100k iterations, and the learning rate is halved every 20k iterations. +2. HR patches of size $256 \times 256$ are randomly cropped from the ground truth DIV2K images. The model again performs $2 \times$ super-resolution in this stage. The remaining configurations are the same as in Stage 1. +3. HR patches of size $512 \times 512$ are randomly cropped from both the DIV2K and LSDIR datasets. In this stage, they use the Adam optimizer to minimize MSE loss, with the learning rate set to $2\mathrm{e}^{-4}$ . The training runs for 50k iterations, and the learning rate is halved every 10k iterations. + +# 4.35. NJUPCA + +General Method Description. Inspired by SPAN [112], they propose the Spatial Frequency Network (SFNet), which fully leverages both spatial and frequency domain representations. SFNet integrates Frequency Knowledge Miner (FKM) modules after each Spatial Attention Block + +![](images/b1af22d432546be3d58f726de7b0d76a5692472560a730d2902ecb22dbc465ac.jpg) +Figure 38. Team NJUPCA: The detailed architecture of the designed FKM. + +(SPAB) to capture frequency domain features, complementing the spatial features extracted by SPAB. This parallel design enables the network to effectively learn and combine spatial and frequency domain representations, enhancing the performance of super-resolution reconstruction. + +As illustrated in Fig. 38, the frequency knowledge miner (FKM) is designed to learn frequency representation from input, which comprises two core components: multi-band frequency learner (MBFL) and full-frequency adjustment learner (FFAL). MBFL aims to enhancing frequency representation by focusing on distinct frequency bands, while FFAL adjusts frequency-domain features from a full-frequency perspective. + +Training Details. They employ two-stage training paradigm: + +- **Stage I - Foundation Training:** Randomly initialized weights are trained on DIV2K and full LSDIR datasets using $128 \times 128$ HR patches. Configuration: Adam optimizer ( $\beta_{1} = 0.9$ , $\beta_{2} = 0.999$ ) with L1 loss, initial learning rate $5 \times 10^{-4}$ (halved every 200 epochs), batch size 64 over 1,000 epochs (34 hours on $4 \times$ NVIDIA A6000). +- Stage II - Refinement: Initialized with Stage I weights, fine-tuned using DIV2K and LSDIR subset. Configuration: L2 loss with cosine learning schedule ( $\eta_{\mathrm{initial}} = 1 \times 10^{-4}$ ), 500 epochs. + +Other details: Training employed standard data augmentation (random rotation and flipping) without additional regularization techniques. + +# 4.36. DepthIBN + +Single Image Super-Resolution (SISR) still faces challenges such as a large number of parameters, high memory consumption, and slow training and inference speed, despite significant advancements. These issues limit the practical use of SISR methods in real-world scenarios. Therefore, recent research has focused on developing lightweight models and optimizing network architectures. Among these techniques, Information Distillation is used to extract important features by splitting channels [43, 45, 67, 71]. One of the main challenges of CNNs is the high computational cost of convolution operations. To reduce this cost, + +![](images/2c3df1452537de9ebf29c6ff074f05d09538ce0acd4db76b079e71e1940d1cb2.jpg) +Figure 39. Team DepthIBN: Involution and BSConv Multi-Depth Distillation Block (IBMDB). + +the Depthwise Separable Convolution (DSConv) [40, 135] method was introduced, but due to the separate processing of channels, some information may be lost. To address this issue, BSCov optimizes feature processing by utilizing kernel correlations, improving performance and reducing computations [34]. Furthermore, shown in Fig. 39, Involution replaces fixed filters with pixel-dependent dynamic filters, making it more sensitive to spatial variations and better at capturing long-range dependencies between pixels [60]. Involution not only reduces parameters and resource consumption but also provides better performance compared to convolution-based models due to its superior feature extraction capability. + +Method. They used the IBMDN model in this challenge, following previous studies in the field of Lightweight Image Super-Resolution [6]. They propose an Involution and BSConv Multi-Depth Distillation Network (IBMDN), consisting of 6 Involution and BSConv Multi-Depth Distillation Blocks (IBMDB). IBMDB integrates Involution and BSConv to balance computational efficiency and feature extraction. The overall architecture of their proposed model consists of four main sections: shallow feature extraction, deep feature extraction, feature fusion, and reconstruction. A $3 \times 3$ convolution is used to extract shallow features. Then, through 6 IBMDB blocks, deep features are extracted and fused using a $1 \times 1$ convolution, followed by refinement through a $3 \times 3$ convolution. The pixel-shuffle operation is then used as the reconstruction module. + +The Involution and BSConv Multi-Depth Distillation Block (IBMDB) consists of three shallow residual blocks (SRB_IBMD) and one channel contrast attention (CCA) block. Based on previous experiments, the use of $3 \times 3$ convolutions, due to computational complexity and a large number of parameters, is not always the best option, especially for lightweight super-resolution models [5]. In SISR models, a fixed structure for feature extraction blocks is + +usually used, while features extracted at different depths of the network may differ. This approach may prevent the model from fully exploiting its capacity. Designing blocks with varying structures tailored to the depth of the network can enhance model performance. In their proposed model, the block structure is adjusted based on network depth to achieve an optimal feature extraction combination at different levels. + +BSCnv reduces parameters using intra-kernel correlation, better preserves information, and improves model accuracy without increasing complexity. Involution, with fewer learning parameters, extracts visual features through its attention mechanism and increases efficiency. Therefore, in the Information distillation structure, they consider the block structure differently. At the beginning of the network, BSCnv is dominant in maintaining pixel correlation and local interactions within the block, and with increasing depth, Involution becomes the dominant operator. If BSCnv is denoted by B and Involution by I, the optimal block combination in the deep feature extraction section is as follows: BBB-BBB-BIB-BIB-IBI-IBI. The details of the blocks are shown in the Fig. 39. + +# 4.37. Cidaut AI + +They propose a lightweight yet effective network with three blocks: an initial Sobel-based block and two ESA-based edge refinement blocks, regulated by a global residual connection. Upscaling is performed via pixel shuffle for efficient super-resolution. + +![](images/0efe7737ced44ffc023622d39d82998bc7cf70f73b427d5f237ad9c301089f96.jpg) +Figure 40. Team Cidaut AI: Fused Edge Attention Network (FEAN) structure. They also show the Sobel Fused Residual Block (SFRB) and the Inverted Residual Bottlenecks (IRB) [86]. + +As shown in Fig. 40, the design integrates two MobileNet Inverted Bottlenecks [86] with channel shuffle and SiLU activation for enhanced information mixing. Inspired by EFDN [117], Sobel-based attention extracts edge features, refined using partial convolutions [84] with minimal + +![](images/e80fade4787f96534f1e2ef24e32d09dadef36238ccf578c2e38675c21631355.jpg) +Figure 41. Team Cidaut AI: Structure of the Enhanced ESA Block (EEB). + +parameter increase. The final attention map, a weighted sum of refined $Gx$ , $Gy$ , and $GxGy$ , undergoes further refinement via partial convolution. A final $1 \times 1$ convolution preserves details while preventing excessive edge processing. + +The proposed ERIB block, an efficient convolutional unit with self-activation, starts with depthwise convolution and $1 \times 1$ feature expansion [86]. Partial convolutions [84] refine features, while channel shuffle enhances mixing. Inspired by Simple Gate [10], they introduce nonlinearity by reducing channels without increasing parameters. A weighted residual connection with partial convolution ensures effective information propagation, maintaining competitive performance despite PyTorch inefficiencies. + +For the EEB in Fig. 41, they draw inspiration from the ReNRB block [91], replacing reparameterized convolutions with ERIB for improved efficiency. Partial convolutions in the ESA bottleneck and residual connections further exploit feature map redundancy. + +Training Strategy. The training was carried out using the DIV2K, FLICK2R, and LSIDR (30%) datasets to improve the model's generalization ability. As a baseline, the model was trained for 1000 epochs with a cosine annealing learning rate scheduler, a crop size of $512 \times 512$ , and a batch size of 16. Due to instability in the loss during training, an optimal learning rate analysis was performed whenever the loss diverged. This led to the implementation of a learning rate sweep strategy, which was organized into 5 stages. + +# 4.38.IVL + +Method. Their approach builds upon the strategy used in SPAN [108], last year's winning method, to extract attention maps and integrates it into the proposed baseline architecture, EFDN [116], aiming to enhance feature extraction and structural representation in image processing tasks. + +Specifically, as illustrated in Figure 42, this strategy is incorporated within the EDBB blocks of EFDN, which are designed to capture fundamental structural features of an image by applying Sobel and Laplacian filters. These fil + +ters emphasize edge and texture information, contributing to improved representation learning. During the inference phase, the EDBB blocks are reparametrized into 3x3 convolutions to maintain computational efficiency while preserving learned feature representations. + +The attention maps are derived following the approach implemented in SPAN, leveraging an activation function that is both odd and symmetric to effectively highlight essential regions of the image. These attention maps serve as a direct substitute for the ESA block present in the original EFDN model, aiming to refine feature selection and enhance the model's overall performance. + +As a result of the applied modifications, the final architecture has a lower parameter count and requires fewer floating-point operations compared to the proposed baseline method, EFDN. + +Training Details. The training process is structured into three progressive phases to optimize performance and stability: + +- Pre-training: The model undergoes an initial training phase using the DIV2K dataset, incorporating data augmentation techniques such as random rotations, horizontal flipping, and random cropping to generate patches of size $64 \times 64$ . Training is conducted over 30,000 iterations with a batch size of 32, utilizing the Adam optimizer $(\beta_{1} = 0.9, \beta_{2} = 0.999)$ . The learning rate is initially set to 1e-3 for the first 20,000 iterations and subsequently reduced to 1e-4 for the remaining 10,000 iterations. L1 loss is used throughout this phase. +- First training stage: The model is further refined using the DIV2K_LSDIR dataset, while maintaining the same augmentation strategies as in the pre-training phase. The patch size is increased to $256 \times 256$ , and training is extended to 100,000 iterations with a batch size of 64. The Adam optimizer $(\beta_{1} = 0.9, \beta_{2} = 0.999)$ is employed, starting with a learning rate of 5e-4, which undergoes a decay by a factor of 0.5 every 20,000 iterations. L1 loss remains the chosen loss function for this stage. +- Second training stage: In the final phase, training continues on the DIV2K_LSDIR dataset with an expanded patch size of $512 \times 512$ for an additional 40,000 iterations. The same augmentation methods are retained, and most hyperparameters remain unchanged. However, to ensure stable convergence and fine-tune performance, the learning rate is reduced to 5e-5. During this stage, L1 loss is applied for the first 10,000 iterations, after which L2 loss is utilized to enhance final model performance. + +All the training phases were performed of the model a single NVIDIA RTX 4070 Super GPU and required approximately 20 hours. + +![](images/a34fd9595397439c984a401aa9617a0634a85b8e638fa7ea12403f01e0a3c2f6.jpg) +Figure 42. Team IVL: Schematic diagram of the method. + +# Acknowledgments + +This work was partially supported by the Humboldt Foundation, the Ministry of Education and Science of Bulgaria (support for INSAIT, part of the Bulgarian National Roadmap for Research Infrastructure). We thank the NTIRE 2025 sponsors: ByteDance, Meituan, Kuaishou, and University of Wurzburg (Computer Vision Lab). + +# A. Teams and Affiliations + +# NTIRE 2025 ESR Teams + +Title: NTIRE 2025 Efficient Super-Resolution Challenge + +Members: + +Bin Ren $^{1,2,4}$ (bin. ren@unitn.it), + +Hang Guo $^{3}$ (cshguo@gmail.com), + +Lei Sun4 (lei.sun@insait.ai) + +Zongwei Wu5 (zongwei.wu@uni-wuerzburg.de), + +Radu Timofte $^{5}$ (radu.timofte@vision.ee.ethz.ch) + +Yawei $\mathrm{Li^{6}}$ (li.yawei.ai@gmail.com), + +Affiliations: + +1 University of Pisa, Italy +$^{2}$ University of Trento, Italy +3 Tsinghua University, China +4 INSÄIT, Sofia University,"St. Kliment Ohridski", Bulgaria +5 Computer Vision Lab, University of Würzburg, Germany +$^{6}$ ETH Zürich, Switzerland + +# EMSR + +Title: Distillation-Supervised Convolutional Low-Rank + +Adaptation for Efficient Image Super-Resolution + +Members: + +Yao Zhang $^{1}$ (yao_zhang@sjtu.edu.cn), + +Xinning Chai1 (chaixinning@sjtu.edu.cn), + +Zhengxue Cheng1 (zxcheng@sjtu.edu.cn), + +Yingsheng Qin $^{2}$ (yingsheng.qin@transsion.com), + +Yucai Yang $^{2}$ (yucai.yang@transsion.com), + +Li Song $^{1}$ (song_li@sjtu.edu.cn), + +Affiliations: + +$^{1}$ Shanghai Jiao Tong University +2 Transsion in China + +# XiaomiMM + +Title: SPANF + +Members: + +Hongyuan $\mathrm{Yu}^1$ (yuhyuan1995@gmail.com), + +Pufan $\mathrm{Xu}^2$ (xpf22@mails.tsinghua.edu.cn), + +Cheng Wan3 (jouiney666@gmail.com), + +Zhijuan Huang1 (huangzhijuan@xiaomi.com), + +Peng Guo $^{4}$ (guopeng0100@163.com), + +Shuyuan Cui5 (jouiney666@gmail.com), + +Chenjun Li $^{3}$ (cl2733@cornell.edu), + +Xuehai Hu (hsquare@mail.ustc.edu.cn), + +Pan Pan1 (panpan@xiaomi.com), + +Xin Zhang $^{1}$ (zhangxin14@xiaomi.com), + +Heng Zhang $^{1}$ (zhangheng8@xiaomi.com), + +Affiliations: + +1 Multimedia Department, Xiaomi Inc. +$^{2}$ School of Integrated Circuits, Tsinghua University +3 Cornell University +4 Hanhai Information Technology (Shanghai) Co., Ltd. +5 Huatai Insurance Group Co., Ltd. + +# ShannonLab + +Title: Reparameterization Network for Efficient Image + +Super-Resolution + +Members: + +Qing Luo $^{1}$ (luoqing.94@qq.com), Linyan Jiang $^{1}$ , Haibo Lei $^{1}$ , Qifang Gao $^{1}$ , Yaqing Li $^{1}$ , + +Affiliations: +1Tencent + +# TSSR + +Title: Light Network for Efficient Image Super-Resolution +Members: +Weihua Luo1 (185471613@qq.com), +Tsing Li1, + +Affiliations: +1 Independent researcher + +# mbga + +Title: Expanded SPAN for Efficient Super-Resolution Members: +Qing Wang $^{1}$ (wangqing.Keen@bytedance.com), +Yi Liu $^{1}$ , +Yang Wang $^{1}$ , +Hongyu An $^{1}$ , +Liou Zhang $^{1}$ , +Shijie Zhao $^{1}$ , + +Affiliations: +1 ByteDance + +# VPEG_C + +Title: DAN: Dual Attention Network for lightweight Image Super-Resolution +Members: +Lianhong Song1 (songlianhong@njust.edu.cn), +Long Sun1, +Jinshan Pan1, +Jiangxin Dong1, +Jinhui Tang1 + +Affiliations: +1Nanjing University of Science and Technology + +# XUPTBoys + +Title: Frequency-Guided Multi-level Dispersion Network for Efficient Image Super-Resolution +Members: Jing Wei1 (freedomwj@126.com), + +Mengyang Wang1, Ruilong Guo1, Qian Wang1,2, Affiliations: + +$^{1}$ Xi'an University of Posts and Telecommunications $^{2}$ National Engineering Laboratory for Cyber Event Warning and Control Technologies + +# HannahSR + +Title: Multi-level Refinement and Bias-learnable Attention Dual Branch Network for Efficient Image Super-Resolution Members: Qingliang Liu $^{1}$ (liuqingliang1@honor.com), Yang Cheng $^{2}$ (obliviate73@outlook.com) Affiliations: + $^{1}$ Beijing Honor Device Co., Ltd. + $^{2}$ State Key Laboratory of Integrated Chip & System, Fudan University + +# Davinci + +Title: PlayerAug +Members: +Davinci (1016994139@qq.com), +Enxuan Gu1(guexstan@163.com), +Affiliations: +1 Dalian University of Technology + +# SRCB + +Title: SPAN with pruning. +Members: +Dafeng Zhang1 (dfeng.zhang@samsung.com), Yang Yong1, +Affiliations: +1 Samsung Research China - Beijing (SRC-B) + +# Rochester + +Title: ESRNet: An enhanced version of SPAN for Efficient Super-Resolution +Members: +Pinxin Liu1 (pliu23@ur.rochester.edu), +Yongsheng Yu1 (yyu90@ur.rochester.edu), +Hang Hua1 (hhua2@cs.rochester.edu), +Yunlong Tang1 (yunlong.tang@rochester.edu), +Affiliations: +1 University of Rochester + +# IESR + +Title: Inference Efficient Super-Rosolution Net Members: + +Shihao Wang1 (shihao.wsh@antgroup.com), Yukun Yang1, Zhiyu Zhang1, Affiliations: +1 Ant Group + +# ASR + +Title: ASR + +Members: + +Yukun Yang $^{1}$ (yukun.yyk@antgroup.com), + +Affiliations: + +1 None + +# VPEG_O + +Title: SAFMNv3: Simple Feature Modulation Network for Real-Time Image Super-Resolution + +Members: + +Long Sun1 (cs.longsun@njust.edu.cn), + +Lianhong Son1, + +Jinshan Pan1, + +Jiangxin Dong1, + +Jinhui Tang + +Affiliations: + +1 Nanjing University of Science and Technology + +# mmSR + +Title: Efficient Feature Aggregation Network for Image Super-Resolution + +Members: + +Jiyu $\mathsf{W u}^1$ (jiyu_wu@163.com), + +Jiancheng Huang $^{1}$ (jc.huang@siat.ac.cn), + +Yifan Liu1, + +Yi Huang $^{1}$ , + +Shifeng Chen 1, + +Affiliations: + +1 Shenzhen Institutes of Advanced Technology, Chinese Academy of Sciences + +# ChanSR + +Title: EECNet: Edge Enhanced Convolutional Network for Efficient Super-Resolution + +Members: + +Rui Chen1 (chenr269@163.com), + +Affiliations: + +1 Shenzhen International Graduate School, Tsinghua University, China + +# Pixel Alchemists + +Title: RCUNet + +Members: + +Yi Feng $^{1}$ (fenyi_work@163.com), + +Mingxi $\mathrm{Li}^1$ + +Cailu Wan1, + +Xiangji $\mathbf{W}\mathbf{u}^{1}$ + +Affiliations: + +$^{1}$ Independent researcher + +# LZ + +Title: Tensor decompose efficient super-resolution network + +Members: + +Zibin Liu1 (1451971605@qq.com), + +Jinyang Zhong $^{2}$ (1439764064@qq.com), + +Affiliations: + +$^{1}$ Southwest Jiaotong University + +Sichuan University + +# Z6 + +Title: GLoReNet: Global and Local feature Refinement Network for Efficient Super-Resolution + +Members: + +Kihwan Yoon $^{1}$ (rlghksdbs@gmail.com), + +Ganzorig Gankhuyag1, + +Affiliations: + +$^{1}$ Korea Electronics Technology Institute (KETI) + +# TACO_SR + +Title: TenInOneSR + +Members: + +Shengyun Zhong $^{1}$ (shengyunzhong2002@gmail.com), + +Mingyang $\mathbf{W u}^{2}$ (mingyang@tamu.edu), + +Renjie $\mathrm{Li}^2$ renjie@tamu.edu), + +Yushen Zuo $^{3}$ (zuoyushen12@gmail.com), + +Zhengzhong $\mathrm{Tu}^2$ (tzz@tamu.edu), + +Affiliations: + +1 Northeastern University, USA +$^{2}$ Texas A&M University, USA +3 The Hong Kong Polytechnic University, Hong Kong + +# AIOT.AI + +Title: Efficient channel attention super-resolution network acting on space + +Members: + +Zongang Gao $1^{1}$ (gaozongang@qq.com), + +Guannan Chen1, + +Yuan Tian1, + +Wenhui Chen + +Affiliations: + +$^{1}$ BOE, AIOT CTO, Beijing, China + +# JNU620 + +Title: Reparameterized Residual Local Feature Network for Efficient Image Super-Resolution + +Members: + +Weijun Yuan $^{1}$ (yweijun@stu2022.jnu.edu.cn), + +Zhan Li1, + +Yihang Chen1, + +Yifan Deng1, + +Ruting Deng1, + +Affiliations: + +$^{1}$ Jinan University + +# LVGroup_HFUT + +Title: Swift Parameter-free Attention Network for Efficient Image Super-Resolution + +Members: + +Yilin Zhang $^{1}$ (eslzzyl@163.com), + +Huan Zheng $^{2}$ , (huanzheng1998@gmail.com), + +Yanyan Wei1 (weiyy@hfut.edu.cn), + +Wenxuan Zhao $^{1}$ (nightvoyagerr@gmail.com), + +Suiyi Zhao $^{1}$ (meranderzhao@gmail.com), + +Fei Wang1 (jiafei127@gmail.com), + +Kun Li $^{1}$ (kunli.hfut@gmail.com), + +Affiliations: + +1 Hefei University of Technology + +2 University of Macau + +# YG + +Title: Spatial-Gate Self-Distillation Network for Efficient Image Super-Resolution + +Members: + +Yinggan Tang $^{1}$ (ygtang@ysu.edu.cn), + +Mengjie Su 2, + +Affiliations: + +$^{1}$ School of Electrical Engineering, Yanshan University + +# MegastudyEdu_Vision.AI + +Title: Multi-scale Aggregation Attention Network for Efficient Image Super-resolution + +Members: + +Jae-hyeon Lee $^{1}$ (dlwogus147@gmail.com), + +Dong-Hyeop Son1, + +Ui-Jin Choi1, + +Affiliations: + +$^{1}$ MegastudyEdu Vision AI + +# MILA + +Title: Multi-Level Variance Feature Modulation Network for Lightweight Image Super-Resolution + +Members: + +Tiancheng Shao1 (shaotiancheng666@outlook.com), + +Yuqing Zhang2 + +Mengcheng $\mathrm{Ma}^3$ + +Affiliations: + +1 Anhui University of Technology + +# AiMF_SR + +Title: Mixture of Efficient Attention for Efficient Image Super-Resolution + +Members: + +Donggeun $\mathrm{Ko}^1$ (sean.ko@aimfuture.ai), + +Youngsang Kwak1, + +Jiun Lee1, + +Jaehwa Kwak1, + +Affiliations: + +1 AiM Future Inc. + +# BVIVSR + +Title: NTIRE 2025 Efficient SR Challenge Factsheet + +Members: + +Yuxuan Jiang $^{1}$ (yuxuan.jiang@bristol.ac.uk), + +Qiang Zhu $^{2,1}$ (zhuqiang@std.uestc.edu.cn), + +Siyue Teng1 (siyue.teng@bristol.ac.uk), + +Fan Zhang1, (fan.zhang@bristol.ac.uk), + +Shuyuan Zhu2, (eezsy@uestc.edu.cn), + +Bing Zeng $^{2}$ , (eezeng@uestc.edu.cn), + +David Bull $^{1}$ (dave.bull@bristol.ac.uk), + +Affiliations: + +1 University of Bristol +$^{2}$ University of Electronic Science and Technology of China + +# CUIT_HTT + +Title: Frequency-Segmented Attention Network for Lightweight Image Super + +Members: + +Jing Hu1 (jing_hu@163.com), + +Hui Deng1, + +Xuan Zhang $^{1}$ , + +Lin Zhu + +Qinrui Fan + +Affiliations: + +1 Chengdu University of Information Technology + +# GXZY.AI + +Title: Parameter Free Vision Mamba For Lightweight Image Super-Resolution + +Members: + +Weijian Deng $^{1}$ (348957269@qq.com), + +Junnan $\mathbf{W u}^{1}$ (838050895@qq.com), + +Wenqin Deng $^{2}$ (1601524278@qq.com), + +Yuquan Liu $^{1}$ (653060432@qq.com), + +Zhaohong $\mathrm{Xu}^{1}$ (719357155@qq.com), + +Affiliations: + +1 Guangxi China Tobacco Industry Corporation Limited, China +2 Guangxi University, China + +# IPCV + +Title: Efficient HiTSR + +Members: + +Jameer Babu Pinjari $^{1}$ (jameer.jb@gmail.com), + +Kuldeep Purohit $^{1}$ , (kuldeeppurohit3@gmail.com) + +Affiliations: + +$^{1}$ Independent researcher + +# X-L + +Title: Partial Permuted Self-Attention for Lightweight Super-Resolution + +Members: + +Zeyu Xiao $^{1}$ (zeyuxiao1997@163.com), + +Zhuoyuan Li $^{2}$ (zhuoyuanli@mail.ustc.edu.cn) + +Affiliations: + +$^{1}$ National University of Singapore +$^{2}$ University of Science and Technology of China + +# Quantum_Res + +Title: Efficient Mamba-Based Image Super-Resolution via Knowledge Distillation + +Members: + +Surya Vashist $^{1}$ (surya.vashisth@s.amity.edu), + +Akshay Dudhane $^{2}$ (akshay.dudhane@mbzuai.ac.ae), + +Praful Hambarde3 (praful@iitmandi.ac.in), + +Sachin Chaudhary $^{4}$ (sachin.chaudhary@ddn.upes.ac.in), + +Satya Naryan Tazi $^{5}$ (satya.tazi@ecajmer.ac.in), + +Prashant Patil $^{6}$ (pwpatil@iitg.ac.in), + +Santosh Kumar Vipparthi7 (skvipparthi@iitrpr.ac.in), + +Subrahmanyam Murala8 (muralas@tcd.ie), + +Affiliations: + +1 Amity University Punjab, India +$^{2}$ Mohamed Bin Zayed University of Artificial Intelligence, Abu Dhabi +3 Indian Institute of Technology Mandi, India +4 UPES Dehradun, India + +$^{5}$ Government Engineering College Ajmer, India +$^{6}$ Indian Institute of Technology Guwahati, India +$^{7}$ Indian Institute of Technology Ropar, India +$^{8}$ Trinity College Dublin, Ireland + +# SylabSR + +Title: AutoRegressive Residual Local Feature Network + +Members: + +Wei-Chen Shen $^{1}$ (r11921a38@ntu.edu.tw), + +I-Hsiang Chen $^{1,2}$ , + +Affiliations: + +$^{1}$ National Taiwan University +2 University of Washington + +# NJUPCA + +Title: Spatial-Frequency Fusion Model for Efficient Super-Resolution + +Members: + +Yunzhe $\mathbf{X}\mathbf{u}^{1}$ (221900144@smail.nju.edu.cn), + +Chen Zhao1, + +Zhizhou Chen1, + +Affiliations: + +$^{1}$ Nanjing University + +# DepthIBN + +Title: Involution and BSConv Multi-Depth Distillation Network for Lightweight Image Super-Resolution + +Members: + +Akram Khatami-Rizi $^{1}$ (akramkhatami67@gmail.com), Ahmad Mahmoudi-Aznaveh $^{1}$ , (a.mahmoudi@sbu.ac.ir + +Affiliations: + +1 Cyberspace Research Institute of Shahid Beheshti University of Iran + +# Cidaut.AI + +Title: Fused Edge Attention Network + +Members: + +Alejandro Merino1 (alemer@cidaut.es), + +Bruno Longarela1 (brulon@cidaut.es), + +Javier Abad1 (javaba@cidadut.es), + +Marcos V. Conde $^{2}$ (marcos.conde@uni-wuerzburg.de), + +Affiliations: + +1 Cidaut AI, Spain +$^{2}$ University of Würzburg, Germany + +# IVL + +Title: PAEDN + +Members: + +Simone Bianco $^{1}$ (simone.bianco@unimib.com), + +Luca Cogo1 (luca.cogo@unimib.com), + +Gianmarco Corti1 (g.corti1967@campus.unimib.com), + +# Affiliations: + +$^{1}$ Department of Informatics Systems and Communication, University of Milano-Bicocca, Viale Sarca 336, Building U14, Milan, Italy + +# References + +[1] Lusine Abrahamyan, Anh Minh Truong, Wilfried Philips, and Nikos Deligiannis. Gradient variance loss for structure-enhanced image super-resolution. In ICASSP 2022-2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 3219-3223. IEEE, 2022. 3 +[2] Eirikur Agustsson and Radu Timofte. Ntire 2017 challenge on single image super-resolution: Dataset and study. In 2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW), pages 1122-1131, 2017. 14 +[3] Eirikur Agustsson and Radu Timofte. Ntire 2017 challenge on single image super-resolution: Dataset and study. In The IEEE Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2017. 33 +[4] Eirikur Agustsson and Radu Timofte. NTIRE 2017 challenge on single image super-resolution: Dataset and study. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops, pages 126-135, 2017. 2, 18, 19, 22, 23, 26, 33 +[5] Akram Khatami-Rizi Ahmad Mahmoudi-Aznaveh. The role of involution in lightweight super resolution. 2024 13th Iranian/3rd International Machine Vision and Image Processing Conference (MVIP), 2024. 37 +[6] Akram Khatami-Rizi Ahmad Mahmoudi-Aznaveh. Involution and bsconv multi-depth distillation network for lightweight image super-resolution. arXiv preprint arXiv:2503.14779, 2025. 37 +[7] Sidra Aleem, Julia Dietlmeier, Eric Arazo, and Suzanne Little. Convlora and adabn based domain adaptation via self-training. In 2024 IEEE International Symposium on Biomedical Imaging (ISBI), pages 1-5. IEEE, 2024. 6, 7 +[8] Jiezhang Cao, Qin Wang, Yongqin Xian, Yawei Li, Bingbing Ni, Zhiming Pi, Kai Zhang, Yulun Zhang, Radu Timofte, and Luc Van Gool. Ciaosr: Continuous implicit attention-in-attention network for arbitrary-scale image super-resolution. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1796–1807, 2023. 2 +[9] Jierun Chen, Shiu-hong Kao, Hao He, Weipeng Zhuo, Song Wen, Chul-Ho Lee, and S-H Gary Chan. Run, don't walk: Chasing higher flops for faster neural networks. In IEEE Conf. Comput. Vis. Pattern Recog., 2023. 33 +[10] Liangyu Chen, Xiaojie Chu, Xiangyu Zhang, and Jian Sun. Simple baselines for image restoration, 2022. 38 + +[11] Zheng Chen, Zongwei Wu, Eduard Zamfir, Kai Zhang, Yu-lun Zhang, Radu Timofte, Xiaokang Yang, Hongyuan Yu, Cheng Wan, Yuxin Hong, et al. Ntire 2024 challenge on image super-resolution (x4): Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6108-6132, 2024. 30 +[12] Zheng Chen, Kai Liu, Jue Gong, Jingkai Wang, Lei Sun, Zongwei Wu, Radu Timofte, Yulun Zhang, et al. NTIRE 2025 challenge on image super-resolution $(\times 4)$ : Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2 +[13] Zheng Chen, Jingkai Wang, Kai Liu, Jue Gong, Lei Sun, Zongwei Wu, Radu Timofte, Yulun Zhang, et al. NTIRE 2025 challenge on real-world face restoration: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2 +[14] Sung-Jin Cho, Seo-Won Ji, Jun-Pyo Hong, Seung-Won Jung, and Sung-Jea Ko. Rethinking coarse-to-fine approach in single image deblurring. In ICCV, 2021. 10, 17, 29 +[15] Sung-Jin Cho, Seo-Won Ji, Jun-Pyo Hong, Seung-Won Jung, and Sung-Jea Ko. Rethinking coarse-to-fine approach in single image deblurring. In ICCV, pages 4641-4650, 2021. 18, 25, 26 +[16] Marcos Conde, Radu Timofte, et al. NTIRE 2025 challenge on raw image restoration and super-resolution. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2 +[17] Marcos Conde, Radu Timofte, et al. Raw image reconstruction from RGB on smartphones. NTIRE 2025 challenge report. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2 +[18] Marcos V Conde, Zhijun Lei, Wen Li, Christos Bampis, Ioannis Katsavounidis, and Radu Timofte. Aim 2024 challenge on efficient video super-resolution for av1 compressed content. arXiv preprint arXiv:2409.17256, 2024. 30 +[19] Weijian Deng, Hongjie Yuan, Lunhui Deng, and Zengtong Lu. Reparameterized residual feature network for lightweight image super-resolution. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 1712-1721, 2023. 22 +[20] Xiaohan Ding, Yuchen Guo, Guiguang Ding, and Jungong Han. Acnet: Strengthening the kernel skeletons for powerful cnn via asymmetric convolution blocks. In Proceedings of the IEEE/CVF international conference on computer vision, pages 1911-1920, 2019. 3 +[21] Xiaohan Ding, Xiangyu Zhang, Jungong Han, and Guiguang Ding. Diverse branch block: Building a convolution as an inception-like unit. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10886-10895, 2021. 3 +[22] Xiaohan Ding, Xiangyu Zhang, Ningning Ma, Jungong Han, Guiguang Ding, and Jian Sun. Repvgg: Making veg-style convnets great again. In Proceedings of the IEEE/CVF + +Conference on Computer Vision and Pattern Recognition, pages 13733-13742, 2021. 6 +[23] Xiaohan Ding, Xiangyu Zhang, Ningning Ma, Jungong Han, Guiguang Ding, and Jian Sun. Repvgg: Making vgg-style convnets great again. In CVPR, 2021. 9, 17 +[24] Jie Du, Kai Guan, Yanhong Zhou, Yuanman Li, and Tianfu Wang. Parameter-free similarity-aware attention module for medical image classification and segmentation. IEEE Transactions on Emerging Topics in Computational Intelligence, 2022. 6 +[25] Zongcai Du, Ding Liu, Jie Liu, Jie Tang, Gangshan Wu, and Lean Fu. Fast and memory-efficient network towards efficient image super-resolution. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 853-862, 2022. 18 +[26] Zongcai Du, Ding Liu, Jie Liu, Jie Tang, Gangshan Wu, and Lean Fu. Fast and memory-efficient network towards efficient image super-resolution. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 853-862, 2022. 36 +[27] Stefan Elfwing, Eiji Uchibe, and Kenji Doya. Sigmoid-weighted linear units for neural network function approximation in reinforcement learning, 2017. 15, 17 +[28] Egor Ershov, Sergey Korchagin, Alexei Khalin, Artyom Panshin, Arsenyi Terekhin, Ekaterina Zaychenkova, Georgiy Lobarev, Vsevolod Plokhotnyuk, Denis Abramov, Elisey Zhdanov, Sofia Dorogova, Yasin Mamedov, Nikola Banic, Georgii Perevozhikov, Radu Timofte, et al. NTIRE 2025 challenge on night photography rendering. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2 +[29] Yuqian Fu, Xingyu Qiu, Bin Ren Yanwei Fu, Radu Timofte, Nicu Sebe, Ming-Hsuan Yang, Luc Van Gool, et al. NTIRE 2025 challenge on cross-domain few-shot object detection: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2 +[30] Albert Gu and Tri Dao. Mamba: Linear-time sequence modeling with selective state spaces. arXiv preprint arXiv:2312.00752, 2023. 6 +[31] Enxuan Gu, Hongwei Ge, and Yong Guo. Code: An explicit content decoupling framework for image restoration. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2920-2930, 2024. 14 +[32] Hang Guo, Yong Guo, Yaohua Zha, Yulun Zhang, Wenbo Li, Tao Dai, Shu-Tao Xia, and Yawei Li. Mambairv2: Attentive state space restoration. arXiv preprint arXiv:2411.15269, 2024. 6, 30, 34, 35 +[33] Hang Guo, Jinmin Li, Tao Dai, Zhihao Ouyang, Xudong Ren, and Shu-Tao Xia. Mambair: A simple baseline for image restoration with state-space model. In European Conference on Computer Vision, pages 222-241. Springer, 2024. 33 +[34] Daniel Haase and Manuel Amthor. Rethinking depthwise separable convolutions: How intra-kernel correlations lead to improved mobilenets. In Proceedings of the IEEE/CVF + +conference on computer vision and pattern recognition, pages 14600-14609, 2020. 31, 37 +[35] Kai Han, Yunhe Wang, Qi Tian, Jianyuan Guo, Chunjing Xu, and Chang Xu. Ghostnet: More features from cheap operations. In IEEE Conf. Comput. Vis. Pattern Recog., pages 1580-1589, 2020. 19 +[36] Shuhao Han, Haotian Fan, Fangyuan Kong, Wenjie Liao, Chunle Guo, Chongyi Li, Radu Timofte, et al. NTIRE 2025 challenge on text to image generation model quality assessment. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2 +[37] Zibin He, Tao Dai, Jian Lu, Yong Jiang, and Shu-Tao Xia. Faked: Feature-affinity based knowledge distillation for efficient image super-resolution. In 2020 IEEE international conference on image processing (ICIP), pages 518-522. IEEE, 2020. 7 +[38] Dan Hendrycks and Kevin Gimpel. Gaussian error linear units (gelus). arXiv preprint arXiv:1606.08415, 2016. 25 +[39] Andrew Howard, Mark Sandler, Grace Chu, Liang-Chieh Chen, Bo Chen, Mingxing Tan, Weijun Wang, Yukun Zhu, Ruoming Pang, Vijay Vasudevan, et al. Searching for MobileNetV3. In Proceedings of the IEEE International Conference on Computer Vision, pages 1314-1324, 2019. 26 +[40] Andrew G Howard, Menglong Zhu, Bo Chen, Dmitry Kalenichenko, Weijun Wang, Tobias Weyand, Marco Andreetto, and Hartwig Adam. Mobilenets: Efficient convolutional neural networks for mobile vision applications. arXiv preprint arXiv:1704.04861, 2017. 37 +[41] Mu Hu, Junyi Feng, Jiashen Hua, Baisheng Lai, Jianqiang Huang, Xiaojin Gong, and Xian-Sheng Hua. Online convolutional re-parameterization. CoRR, abs/2204.00826, 2022. 19 +[42] Zhewei Huang, Tianyuan Zhang, Wen Heng, Boxin Shi, and Shuchang Zhou. Real-time intermediate flow estimation for video frame interpolation. In Proceedings of the European Conference on Computer Vision (ECCV), 2022. 6, 9 +[43] Zheng Hui, Xiumei Wang, and Xinbo Gao. Fast and accurate single image super-resolution via information distillation network. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 723-731, 2018. 36 +[44] Zheng Hui, Xinbo Gao, Yunchu Yang, and Xiumei Wang. Lightweight image super-resolution with information multi-distillation network. In Proceedings of the 27th acm international conference on multimedia, pages 2024-2032, 2019. 11 +[45] Zheng Hui, Xinbo Gao, Yunchu Yang, and Xiumei Wang. Lightweight image super-resolution with information multi-distillation network. In Proceedings of the 27th acm international conference on multimedia, pages 2024-2032, 2019. 10, 36 +[46] Pavel Izmailov, Dmitrii Podoprikhin, Timur Garipov, Dmitry Vetrov, and Andrew Gordon Wilson. Averaging weights leads to wider optima and better generalization. arXiv preprint arXiv:1803.05407, 2018. 23 + +[47] Varun Jain, Zongwei Wu, Quan Zou, Louis Florentin, Henrik Turbell, Sandeep Siddhartha, Radu Timofte, et al. NTIRE 2025 challenge on video quality enhancement for video conferencing: Datasets, methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2 +[48] Yuxuan Jiang, Chen Feng, Fan Zhang, and David Bull. Mtkd: Multi-teacher knowledge distillation for image super-resolution. In European Conference on Computer Vision, pages 364–382. Springer, 2024. 30, 31 +[49] Yuxuan Jiang, Ho Man Kwan, Tianhao Peng, Ge Gao, Fan Zhang, Xiaqing Zhu, Joel Sole, and David Bull. HIIF: Hierarchical encoding based implicit image function for continuous super-resolution. arXiv preprint arXiv:2412.03748, 2024. 30 +[50] Yuxuan Jiang, Jakub Nawala, Chen Feng, Fan Zhang, Xiaogjing Zhu, Joel Sole, and David Bull. Rtsr: A real-time super-resolution model for av1 compressed content. arXiv preprint arXiv:2411.13362, 2024. 30 +[51] Yuxuan Jiang, Jakub Nawala, Fan Zhang, and David Bull. Compressing deep image super-resolution models. In 2024 Picture Coding Symposium (PCS), pages 1-5. IEEE, 2024. 14, 30 +[52] Yuxuan Jiang, Chengxi Zeng, Siyue Teng, Fan Zhang, Xiaogjing Zhu, Joel Sole, and David Bull. C2D-ISR: Optimizing attention-based image super-resolution from continuous to discrete scales. arXiv preprint arXiv:2503.13740, 2025. 30, 31 +[53] Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014. 12 +[54] Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014. 8, 14, 18, 28, 30 +[55] F. Kong, Mingxi Li, Songwei Liu, Ding Liu, Jingwen He, Yang Bai, Fangmin Chen, and Lean Fu. Residual local feature network for efficient super-resolution. 2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW), pages 765-775, 2022. 19, 22 +[56] Fangyuan Kong, Mingxi Li, Songwei Liu, Ding Liu, Jingwen He, Yang Bai, Fangmin Chen, and Lean Fu. Residual local feature network for efficient super-resolution. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 766-776, 2022. 18, 35 +[57] Kin Wai Lau, Lai-Man Po, and Yasar Abbas Ur Rehman. Large separable kernel attention: Rethinking the large kernel attention design in cnn. Expert Systems with Applications, 236:121352, 2023. 28 +[58] Sangmin Lee, Eunpil Park, Angel Canelo, Hyunhee Park, Youngjo Kim, Hyungju Chun, Xin Jin, Chongyi Li, Chun-Le Guo, Radu Timofte, et al. NTIRE 2025 challenge on efficient burst hdr and restoration: Datasets, methods, and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2 +[59] Xiaoyan Lei, Wenlong Zhang, and Weifeng Cao. Dvmsr: Distillated vision mamba for efficient super-resolution. In + +Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, pages 6536-6546, 2024. 33 +[60] Duo Li, Jie Hu, Changhu Wang, Xiangtai Li, Qi She, Lei Zhu, Tong Zhang, and Qifeng Chen. Involution: Inverting the inheritance of convolution for visual recognition. Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2021. 37 +[61] Xin Li, Yeying Jin, Xin Jin, Zongwei Wu, Bingchen Li, Yufei Wang, Wenhan Yang, Yu Li, Zhibo Chen, Bihan Wen, Robby Tan, Radu Timofte, et al. NTIRE 2025 challenge on day and night raindrop removal for dual-focused images: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2 +[62] Xin Li, Xijun Wang, Bingchen Li, Kun Yuan, Yizhen Shao, Suhang Yao, Ming Sun, Chao Zhou, Radu Timofte, and Zhibo Chen. NTIRE 2025 challenge on short-formUGC video quality assessment and enhancement: Kwaisr dataset and study. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2 +[63] Xin Li, Kun Yuan, Bingchen Li, Fengbin Guan, Yizhen Shao, Zihao Yu, Xijun Wang, Yiting Lu, Wei Luo, Suhang Yao, Ming Sun, Chao Zhou, Zhibo Chen, Radu Timofte, et al. NTIRE 2025 challenge on short-formUGC video quality assessment and enhancement: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2 +[64] Yawei Li, Kai Zhang, Jingyun Liang, Jiezhang Cao, Ce Liu, Rui Gong, Yulun Zhang, Hao Tang, Yun Liu, Denis Demandolx, et al. Lsdir: A large scale dataset for image restoration. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops, 2023. 2, 6, 10, 12, 14, 16, 17, 18, 19, 23, 24, 26, 28, 30, 33, 36 +[65] Yawei Li, Yulun Zhang, Luc Van Gool, Radu Timofte, et al. NTIRE 2023 challenge on efficient super-resolution: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops, 2023. 15, 16 +[66] Zheyuan Li, Yingqi Liu, Xiangyu Chen, Haoming Cai, Jinjin Gu, Yu Qiao, and Chao Dong. Blueprint separable residual network for efficient image super-resolution. In 2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW), pages 832-842, 2022. 13, 26 +[67] Zheyuan Li, Yingqi Liu, Xiangyu Chen, Haoming Cai, Jinjin Gu, Yu Qiao, and Chao Dong. Blueprint separable residual network for efficient image super-resolution. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 833-843, 2022. 10, 36 +[68] Jie Liang, Radu Timofte, Qiaosi Yi, Zhengqiang Zhang, Shuaizheng Liu, Lingchen Sun, Rongyuan Wu, Xindong Zhang, Hui Zeng, Lei Zhang, et al. NTIRE 2025 the 2nd restore any image model (RAIM) in the wild challenge. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2 + +[69] Bee Lim, Sanghyun Son, Heewon Kim, Seungjun Nah, and Kyoung Mu Lee. Enhanced deep residual networks for single image super-resolution. In 2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW), pages 1132-1140, 2017. 14 +[70] Bee Lim, Sanghyun Son, Heewon Kim, Seungjun Nah, and Young Mu Lee. Enhanced deep residual networks for single image super-resolution. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops, pages 1132-1140, 2017. 12, 17, 26, 28, 30 +[71] Jie Liu, Jie Tang, and Gangshan Wu. Residual feature distillation network for lightweight image super-resolution. In Proceedings of the European Conference on Computer Vision Workshops, pages 41-55. Springer, 2020. 10, 32, 36 +[72] Jie Liu, Jie Tang, and Gangshan Wu. Residual feature distillation network for lightweight image super-resolution. In Computer Vision-ECCV 2020 Workshops: Glasgow, UK, August 23-28, 2020, Proceedings, Part III 16, pages 41-55. Springer, 2020. 21 +[73] Jie Liu, Wenjie Zhang, Yuting Tang, Jie Tang, and Gangshan Wu. Residual feature aggregation network for image super-resolution. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 2359-2368, 2020. 11 +[74] Xiaohong Liu, Xiongkuo Min, Qiang Hu, Xiaoyun Zhang, Jie Guo, et al. NTIRE 2025 XGC quality assessment challenge: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2 +[75] Xiaoning Liu, Zongwei Wu, Florin-Alexandru Vasluianu, Hailong Yan, Bin Ren, Yulun Zhang, Shuhang Gu, Le Zhang, Ce Zhu, Radu Timofte, et al. NTIRE 2025 challenge on low light image enhancement: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2 +[76] Zhuang Liu, Mingjie Sun, Tinghui Zhou, Gao Huang, and Trevor Darrell. Rethinking the value of network pruning. In ICLR, 2019. 2 +[77] Zhaoyang Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, and Baining Guo. Proceedings of the IEEE/cvf international conference on computer vision. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 10012-10022, 2021. 12 +[78] Ilya Loshchilov and Frank Hutter. Sgdr: Stochastic gradient descent with warm restarts. In *ICLR*, 2017, 17, 29 +[79] Qi Ma, Yue Li, Bin Ren, Nicu Sebe, Ender Konukoglu, Theo Gevers, Luc Van Gool, and Danda Pani Paudel. Shapesplat: A large-scale dataset of gaussian splats and their self-supervised pretraining. In International Conference on 3D Vision 2025, 2024. 2 +[80] Yanyu Mao, Nihao Zhang, Qian Wang, Bendu Bai, Wanying Bai, Haonan Fang, Peng Liu, Mingyue Li, and Shengbo Yan. Multi-level dispersion residual network for efficient image super-resolution. In 2023 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW), pages 1660-1669, 2023. 12 +[81] Yanyu Mao, Nihao Zhang, Qian Wang, Bendu Bai, Wanying Bai, Haonan Fang, Peng Liu, Mingyue Li, and Shengbo + +Yan. Multi-level dispersion residual network for efficient image super-resolution. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops, pages 1660-1669, 2023. 10, 11, 28 +[82] Jakub Nawala, Yuxuan Jiang, Fan Zhang, Xiaqing Zhu, Joel Sole, and David Bull. Bvi-aom: A new training dataset for deep video compression optimization. In 2024 IEEE International Conference on Visual Communications and Image Processing (VCIP), pages 1-5. IEEE, 2024. 30 +[83] Ying Nie, Kai Han, Zhenhua Liu, An Xiao, Yiping Deng, Chunjing Xu, and Yunhe Wang. Ghostsr: Learning ghost features for efficient image super-resolution. CoRR, abs/2101.08525, 2021. 19 +[84] Seung Park, Yoon-Jae Yeo, and Yong-Goo Shin. Pconv: simple yet effective convolutional layer for generative adversarial network. Neural Computing and Applications, 34 (9):7113-7124, 2022. 37, 38 +[85] Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, and Soumith Chintala. Pytorch: An imperative style, high-performance deep learning library. arXiv preprint arXiv:1912.01703, 2019. 18 +[86] Danfeng Qin, Chas Leichner, Manolis Delakis, Marco Fornoni, Shixin Luo, Fan Yang, Weijun Wang, Colby Banbury, Chengxi Ye, Berkin Akin, Vaibhav Aggarwal, Tenghui Zhu, Daniele Moro, and Andrew Howard. Mobilenetv4 - universal models for the mobile ecosystem, 2024. 37, 38 +[87] Yajun Qiu, Qiang Zhu, Shuyuan Zhu, and Bing Zeng. Dual circle contrastive learning-based blind image superresolution. IEEE Transactions on Circuits and Systems for Video Technology, 34(3):1757-1771, 2023. 30 +[88] Yunpeng Qu, Kun Yuan, Jinhua Hao, Kai Zhao, Qizhi Xie, Ming Sun, and Chao Zhou. Visual autoregressive modeling for image super-resolution. arXiv preprint arXiv:2501.18993, 2025. 35 +[89] Bin Ren, Yahui Liu, Yue Song, Wei Bi, Rita Cucchiara, Nicu Sebe, and Wei Wang. Masked jigsaw puzzle: A versatile position embedding for vision transformers. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 20382-20391, 2023. 2 +[90] Bin Ren, Yawei Li, Jingyun Liang, Rakesh Ranjan, Mengyuan Liu, Rita Cucchiara, Luc V Gool, Ming-Hsuan Yang, and Nicu Sebe. Sharing key semantics in transformer makes efficient image restoration. Advances in Neural Information Processing Systems, 37:7427-7463, 2024. 2 +[91] Bin Ren, Yawei Li, Nancy Mehta, Radu Timofte, Hongyuan Yu, Cheng Wan, Yuxin Hong, Bingnan Han, Zhuoyuan Wu, Yajun Zou, et al. The ninth nitire 2024 efficient super-resolution challenge report. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6595-6631, 2024. 2, 3, 4, 6, 17, 21, 35, 38 +[92] Bin Ren, Hang Guo, Lei Sun, Zongwei Wu, Radu Timofte, Yawei Li, et al. The tenth NTIRE 2025 efficient super-resolution challenge report. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2 + +[93] Nickolay Safonov, Alexey Bryntsev, Andrey Moskalenko, Dmitry Kulikov, Dmitriy Vatolin, Radu Timofte, et al. NTIRE 2025 challenge on UGC video enhancement: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2 +[94] Wenzhe Shi, Jose Caballero, Ferenc Huszár, Johannes Totz, Andrew P Aitken, Rob Bishop, Daniel Rueckert, and Zehan Wang. Real-time single image and video super-resolution using an efficient sub-pixel convolutional neural network. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 1874-1883, 2016. 25 +[95] Long Sun, Jinshan Pan, and Jinhui Tang. Shufflemixer: An efficient convnet for image super-resolution. Advances in Neural Information Processing Systems, 35:17314-17326, 2022. 29 +[96] Long Sun, Jiangxin Dong, Jinhui Tang, and Jinshan Pan. Spatially-adaptive feature modulation for efficient image super-resolution. In ICCV, 2023. 17 +[97] Lei Sun, Andrea Alfarano, Peiqi Duan, Shaolin Su, Kaiwei Wang, Boxin Shi, Radu Timofte, Danda Pani Paudel, Luc Van Gool, et al. NTIRE 2025 challenge on event-based image deblurring: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2 +[98] Lei Sun, Hang Guo, Bin Ren, Luc Van Gool, Radu Timofte, Yawei Li, et al. The tenth ntire 2025 image denoising challenge report. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2 +[99] Yunlong Tang, Junjia Guo, Pinxin Liu, Zhiyuan Wang, Hang Hua, Jia-Xing Zhong, Yunzhong Xiao, Chao Huang, Luchuan Song, Susan Liang, Yizhi Song, Liu He, Jing Bi, Mingqian Feng, Xinyang Li, Zeliang Zhang, and Chen-liang Xu. Generative ai for cel-animation: A survey. arXiv preprint arXiv:2501.06250, 2025. 14 +[100] Radu Timofte, Eirikur Agustsson, Luc Van Gool, Ming-Hsuan Yang, and Lei Zhang. Ntire 2017 challenge on single image super-resolution: Methods and results. In CVPR Workshops, 2017. 10, 17 +[101] Radu Timofte, Eirikur Agustsson, Luc Van Gool, Ming-Hsuan Yang, and Lei Zhang. Ntire 2017 challenge on single image super-resolution: Methods and results. In Proceedings of the IEEE conference on computer vision and pattern recognition workshops, pages 114-125, 2017. 23, 33 +[102] Radu Timofte, Eirikur Agustsson, Luc Van Gool, Ming-Hsuan Yang, and Lei Zhang. Ntire 2017 challenge on single image super-resolution: Methods and results. In CVPR workshops, pages 114-125, 2017. 12, 30 +[103] Radu Timofte, Eirikur Agustsson, Luc Van Gool, Ming-Hsuan Yang, Lei Zhang, et al. NTIRE 2017 challenge on single image super-resolution: Methods and results. In CVPR Workshops, 2017. 17, 28 +[104] Radu Timofte, Eirikur Agustsson, Shuhang Gu, J Wu, A Ignatov, and L Van Gool. Div2k dataset: Diverse 2k resolution high quality images as used for the challenges@ ntire (cvpr 2017 and cvpr 2018) and@ pirm (eccv 2018), 2018. 24, 36 + +[105] Florin-Alexandru Vasluianu, Tim Seizinger, Zhuyun Zhou, Caitian Chen, Zongwei Wu, Radu Timofte, et al. NTIRE 2025 image shadow removal challenge report. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2 +[106] Florin-Alexandru Vasluianu, Tim Seizinger, Zhuyun Zhou, Zongwei Wu, Radu Timofte, et al. NTIRE 2025 ambient lighting normalization challenge. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2 +[107] Pavan Kumar Anasosalu Vasu, James Gabriel, Jeff Zhu, Oncel Tuzel, and Anurag Ranjan. An improved one millisecond mobile backbone. arXiv preprint arXiv:2206.04040, 2022. 9 +[108] Cheng Wan, Hongyuan Yu, Zhiqi Li, Yihang Chen, Yajun Zou, Yuqing Liu, Xuanwu Yin, and Kunlong Zuo. Swift parameter-free attention network for efficient superresolution. arXiv preprint arXiv:2311.12770, 2023. 34, 35, 38 +[109] Cheng Wan, Hongyuan Yu, Zhiqi Li, Yihang Chen, Ya-jun Zou, Yuqing Liu, Xuanwu Yin, and Kunlong Zuo. Swift parameter-free attention network for efficient super-resolution. In 2024 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW), pages 6246-6256, 2024. 12, 13 +[110] Cheng Wan, Hongyuan Yu, Zhiqi Li, Yihang Chen, Yajun Zou, Yuqing Liu, Xuanwu Yin, and Kunlong Zuo. Swift parameter-free attention network for efficient superresolution. In IEEE Conf. Comput. Vis. Pattern Recog. Worksh., 2024. NTIRE 2024 ESR Challenge. 21 +[111] Cheng Wan, Hongyuan Yu, Zhiqi Li, Yihang Chen, Yajun Zou, Yuqing Liu, Xuanwu Yin, and Kunlong Zuo. Swift parameter-free attention network for efficient superresolution. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6246-6256, 2024. 9, 20 +[112] Cheng Wan, Hongyuan Yu, Zhiqi Li, Yihang Chen, Yajun Zou, Yuqing Liu, Xuanwu Yin, and Kunlong Zuo. Swift parameter-free attention network for efficient superresolution. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2024. 7, 8, 14, 20, 21, 23, 24, 26, 33, 36 +[113] Hang Wang, Xuanhong Chen, Bingbing Ni, Yutian Liu, and Jinfan Liu. Omni aggregation networks for lightweight image super-resolution. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 22378-22387, 2023. 17 +[114] Hongyuan Wang, Ziyan Wei, Qingting Tang, Shuli Cheng, Liejun Wang, and Yongming Li. Attention guidance distillation network for efficient image super-resolution. In 2024 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW), pages 6287-6296, 2024. 12, 13, 28 +[115] Xintao Wang, Liangbin Xie, Ke Yu, Kelvin C.K. Chan, Chen Change Loy, and Chao Dong. BasicSR: Open source image and video restoration toolbox. https://github.com/XPixelGroup/BasicSR, 2022.29 + +[116] Yan Wang. Edge-enhanced feature distillation network for efficient super-resolution. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, pages 777-785, 2022. 2, 3, 4, 18, 38 +[117] Yan Wang. Edge-enhanced feature distillation network for efficient super-resolution, 2022. 37 +[118] Yucong Wang and Minjie Cai. A single residual network with eta modules and distillation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1970-1980, 2023. 18 +[119] Yan Wang, Yusen Li, Gang Wang, and Xiaoguang Liu. Multi-scale attention network for single image superresolution. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2024. 28 +[120] Yan Wang, Yusen Li, Gang Wang, and Xiaoguang Liu. Pla-nusr: Chasing faster convnet for efficient super-resolution. arXiv preprint arXiv:2409.13435, 2024. 26 +[121] Yingqian Wang, Zhengyu Liang, Fengyuan Zhang, Lvli Tian, Longguang Wang, Juncheng Li, Jungang Yang, Radu Timofte, Yulan Guo, et al. NTIRE 2025 challenge on light field image super-resolution: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2 +[122] Gang Wu, Junjun Jiang, Junpeng Jiang, and Xianming Liu. Transforming image super-resolution: A convformer-based efficient approach. IEEE Transactions on Image Processing, 2024. 27, 28 +[123] Chengxing Xie, Xiaoming Zhang, Linze Li, Yuqian Fu, Biao Gong, Tianrui Li, and Kai Zhang. Mat: Multi-range attention transformer for efficient image super-resolution. IEEE Transactions on Circuits and Systems for Video Technology, 2025. 2 +[124] Xingyu Xie, Pan Zhou, Huan Li, Zhouchen Lin, and Shuicheng Yan. Adan: Adaptive nesterov momentum algorithm for faster optimizing deep models. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2024. 26 +[125] Kangning Yang, Jie Cai, Ling Ouyang, Florin-Alexandru Vasluianu, Radu Timofte, Jiaming Ding, Huiming Sun, Lan Fu, Jinlong Li, Chiu Man Ho, Zibo Meng, et al. NTIRE 2025 challenge on single image reflection removal in the wild: Datasets, methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2 +[126] Lingxiao Yang, Ru-Yuan Zhang, Lida Li, and Xiaohua Xie. Simam: A simple, parameter-free attention module for convolutional neural networks. In International conference on machine learning, pages 11863-11874. PMLR, 2021. 6 +[127] Kihwan Yoon, Ganzorig Gankhuyag, Jinman Park, Haengseon Son, and Kyoungwon Min. Casr: Efficient cascade network structure with channel aligned method for 4k real-time single image super-resolution. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7911-7920, 2024. 21 +[128] Lei Yu, Xinpeng Li, Youwei Li, Ting Jiang, Qi Wu, Haoqiang Fan, and Shuaicheng Liu. Dipnet: Efficiency distillation and iterative pruning for image super-resolution. In + +Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1692-1701, 2023. 15, 16 +[129] Xiyu Yu, Tongliang Liu, Xinchao Wang, and Dacheng Tao. On compressing deep models by low rank and sparse decomposition. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 7370-7379, 2017. 2 +[130] Pierluigi Zama Ramirez, Fabio Tosi, Luigi Di Stefano, Radu Timofte, Alex Costanzino, Matteo Poggi, Samuele Salti, Stefano Mattoccia, et al. NTIRE 2025 challenge on hr depth from images of specular and transparent surfaces. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2 +[131] Eduard Zamfir, Zongwei Wu, Nancy Mehta, Yulun Zhang, and Radu Timofte. See more details: Efficient image superresolution by experts mining. In *Forty-first International Conference on Machine Learning*, 2024. 29 +[132] Syed Waqas Zamir, Aditya Arora, Salman Khan, Munawar Hayat, Fahad Shahbaz Khan, and Ming-Hsuan Yang. Restormer: Efficient transformer for high-resolution image restoration. In CVPR, 2022. 10, 28 +[133] Dafeng Zhang, Feiyu Huang, Shizhuo Liu, Xiaobing Wang, and Zhezhu Jin. Swinfir: Revisiting the swinir with fast fourier convolution and improved training for image super-resolution, 2022. 14 +[134] Xiang Zhang. Hit-sr: Hierarchical transformer for efficient image super-resolution. https://github.com/XiangZ-0/HiT-SR, 2024. GitHub repository. 33 +[135] Xiangyu Zhang, Xinyu Zhou, Mengxiao Lin, and Jian Sun. Shufflenet: An extremely efficient convolutional neural network for mobile devices. Proceedings of the IEEE conference on computer vision and pattern recognition, 2018. 37 +[136] Xindong Zhang, Hui Zeng, and Lei Zhang. Edge-oriented convolution block for real-time super resolution on mobile devices. In MM '21: ACM Multimedia Conference, Virtual Event, China, October 20 - 24, 2021, pages 4034-4043. ACM, 2021. 19 +[137] Xindong Zhang, Huiyu Zeng, and Lei Zhang. Edge-oriented convolution block for real-time super resolution on mobile devices. Proceedings of the 29th ACM International Conference on Multimedia, 2021. 19 +[138] Xindong Zhang, Hui Zeng, and Lei Zhang. Edge-oriented convolution block for real-time super resolution on mobile devices. In Proceedings of the 29th ACM International Conference on Multimedia, pages 4034-4043, 2021. 3, 21 +[139] Xiang Zhang, Yulun Zhang, and Fisher Yu. Hit-sr: Hierarchical transformer for efficient image super-resolution. In European Conference on Computer Vision, pages 483-500. Springer, 2024. 30 +[140] Xiang Zhang, Yulun Zhang, and Fisher Yu. Hit-sr: Hierarchical transformer for efficient image super-resolution. arXiv preprint, arXiv:2407.05878, 2024. 33 +[141] Yulun Zhang, Kai Zhang, Zheng Chen, Yawei Li, Radu Timofte, et al. NTIRE 2023 challenge on image superresolution (x4): Methods and results. In Proceedings of + +the IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops, 2023. 30 +[142] Hengyuan Zhao, Xiangtao Kong, Jingwen He, Yu Qiao, and Chao Dong. Efficient image super-resolution using pixel attention. In European Conference on Computer Vision, pages 56-72. Springer, 2020. 26 +[143] Mengyi Zhao, Mengyuan Liu, Bin Ren, Shuling Dai, and Nicu Sebe. Denoising diffusion probabilistic models for action-conditioned 3d motion generation. In ICASSP 2024-2024 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 4225-4229. IEEE, 2024. 2 +[144] Mingjun Zheng, Long Sun, Jiangxin Dong, and Jinshan Pan. Smfanet: A lightweight self-modulation feature aggregation network for efficient image super-resolution. In ECCV, 2024. 10, 17, 28 +[145] Mingjun Zheng, Long Sun, Jiangxin Dong, and Jinshan Pan. Smfanet: A lightweight self-modulation feature aggregation network for efficient image super-resolution. In European Conference on Computer Vision, pages 359-375. Springer, 2024. 29 +[146] Xu Zheng, Yunhao Luo, Pengyuan Zhou, and Lin Wang. Distilling efficient vision transformers from cnns for semantic segmentation. Pattern Recognition, 158:111029, 2025. 2 +[147] Yupeng Zhou, Zhen Li, Chun-Le Guo, Song Bai, Ming-Ming Cheng, and Qibin Hou. Srformer: Permuted self-attention for single image super-resolution. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 12780–12791, 2023. 30, 33, 34 +[148] Lianghui Zhu, Bencheng Liao, Qian Zhang, Xinlong Wang, Wenyu Liu, and Xinggang Wang. Vision mamba: Efficient visual representation learning with bidirectional state space model. In *Forty-first International Conference on Machine Learning*, 2024. 2 +[149] Qiang Zhu, Pengfei Li, and Qianhui Li. Attention retractable frequency fusion transformer for image super resolution. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1756-1763, 2023. 30 \ No newline at end of file diff --git a/data/2025/2504_10xxx/2504.10686/images/01eacf3cccd113c242f920e122e9dd250100f74c8f5e7f603cf5c928a27f10c7.jpg b/data/2025/2504_10xxx/2504.10686/images/01eacf3cccd113c242f920e122e9dd250100f74c8f5e7f603cf5c928a27f10c7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d50bce1b2e5f5a4f7223bdd66a58b29b6dda7824 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/01eacf3cccd113c242f920e122e9dd250100f74c8f5e7f603cf5c928a27f10c7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:132abcf734c0318cb6805a59828efa1c318702a1810490b98d032c6fcf8a990c +size 47802 diff --git a/data/2025/2504_10xxx/2504.10686/images/04628618f2817ad511739ed0c8edd934bec4a877efa1648b1428c4283d5c12db.jpg b/data/2025/2504_10xxx/2504.10686/images/04628618f2817ad511739ed0c8edd934bec4a877efa1648b1428c4283d5c12db.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3f4560b4ff41cfe4034a4fc4e2d13a5342999f61 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/04628618f2817ad511739ed0c8edd934bec4a877efa1648b1428c4283d5c12db.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5318f7ab5148025f694e4d777f3a670534a2f4ac570ff2d9409bdfac22ab8218 +size 73058 diff --git a/data/2025/2504_10xxx/2504.10686/images/06848c39c978127dbf1a5777572509c2538e8cda239227794323ca26f32c9d74.jpg b/data/2025/2504_10xxx/2504.10686/images/06848c39c978127dbf1a5777572509c2538e8cda239227794323ca26f32c9d74.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ea118788dfd6af51097317b4ff2ce57324d6c089 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/06848c39c978127dbf1a5777572509c2538e8cda239227794323ca26f32c9d74.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:40ab944ff34b8f767cd13b533af3b020e24e2fe741e168b8dc7d9600cff034d1 +size 33406 diff --git a/data/2025/2504_10xxx/2504.10686/images/083bbb9f17a8948f4ae5b792fe6c480e191cd46a4ccd5b188e6f518badad2507.jpg b/data/2025/2504_10xxx/2504.10686/images/083bbb9f17a8948f4ae5b792fe6c480e191cd46a4ccd5b188e6f518badad2507.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e92125a59532ed60d82c34f24b8ab7ffac5c1261 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/083bbb9f17a8948f4ae5b792fe6c480e191cd46a4ccd5b188e6f518badad2507.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d4890fb7f8e84a51a57168046e8cc074fa666a5054ad56d5d9c6389c4f38c968 +size 31756 diff --git a/data/2025/2504_10xxx/2504.10686/images/084bf72c1241e932922747ff09975d59d3e31eba791d72306bc8b49497123eb2.jpg b/data/2025/2504_10xxx/2504.10686/images/084bf72c1241e932922747ff09975d59d3e31eba791d72306bc8b49497123eb2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..85afaed909f6e4f6718c2d918b4b3dcdb8b62346 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/084bf72c1241e932922747ff09975d59d3e31eba791d72306bc8b49497123eb2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4b4ea99c6a15bc509ea3409180d3493d5ac3caf15f3d0b53602ae48947fc4c34 +size 2707 diff --git a/data/2025/2504_10xxx/2504.10686/images/0bdef5b2be98fa47128c19bf1077c8c10251c4776405671e6a96c0507aeb5481.jpg b/data/2025/2504_10xxx/2504.10686/images/0bdef5b2be98fa47128c19bf1077c8c10251c4776405671e6a96c0507aeb5481.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d04b87cf1aca896fbbdb628ca8325be26ec44554 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/0bdef5b2be98fa47128c19bf1077c8c10251c4776405671e6a96c0507aeb5481.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9f3c7f8152bae7e29cc05794b7cd79e233dc7241cedd90c9273747dfeab4c6ef +size 22645 diff --git a/data/2025/2504_10xxx/2504.10686/images/0bf0af3476ab1274fdca8718aeca55fcd93d0f9478a7cbb685702fba7d668c43.jpg b/data/2025/2504_10xxx/2504.10686/images/0bf0af3476ab1274fdca8718aeca55fcd93d0f9478a7cbb685702fba7d668c43.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c01b175e932cdd6d8222abe9e0668f1b629cfec5 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/0bf0af3476ab1274fdca8718aeca55fcd93d0f9478a7cbb685702fba7d668c43.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d7961960d577b661b731d3354e452893c3af923c4f786b2a618d385d3b5e9391 +size 3816 diff --git a/data/2025/2504_10xxx/2504.10686/images/0efe7737ced44ffc023622d39d82998bc7cf70f73b427d5f237ad9c301089f96.jpg b/data/2025/2504_10xxx/2504.10686/images/0efe7737ced44ffc023622d39d82998bc7cf70f73b427d5f237ad9c301089f96.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d8103086ada00cf812ccefb5aa4311573c5a38b0 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/0efe7737ced44ffc023622d39d82998bc7cf70f73b427d5f237ad9c301089f96.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8ec8b60a90ecf4b39f37ad35db2e9cbeaccf8a98452ae7db953ea835dd496131 +size 43487 diff --git a/data/2025/2504_10xxx/2504.10686/images/12903d3b3084e11b5f896b87149ea6ab71e21ca200bfcd42111d265b7224d8f0.jpg b/data/2025/2504_10xxx/2504.10686/images/12903d3b3084e11b5f896b87149ea6ab71e21ca200bfcd42111d265b7224d8f0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ba9aa974cb9c06f3fba4ccb92c29001a6187bdbb --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/12903d3b3084e11b5f896b87149ea6ab71e21ca200bfcd42111d265b7224d8f0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:97673427de1330b81c6a6c52a3be867eb9d9c64c72fee7b9af2cc92c3338a5a7 +size 1264 diff --git a/data/2025/2504_10xxx/2504.10686/images/17821671883df0836f9272ab356f13d8f4e20543a0f3382103eb12be5c27e5b0.jpg b/data/2025/2504_10xxx/2504.10686/images/17821671883df0836f9272ab356f13d8f4e20543a0f3382103eb12be5c27e5b0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e6f0ad5e4552a28c6b6ecd31040dc763d1b00d4e --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/17821671883df0836f9272ab356f13d8f4e20543a0f3382103eb12be5c27e5b0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:89624bd0f282e594653fef960bdaba0ba73ab41aa9a361c9b8b9643d1370e9a6 +size 37956 diff --git a/data/2025/2504_10xxx/2504.10686/images/23878695d7788854da19099b7b55349d915b8272fd39b89acbe3ce96ec10141f.jpg b/data/2025/2504_10xxx/2504.10686/images/23878695d7788854da19099b7b55349d915b8272fd39b89acbe3ce96ec10141f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..933a8c413cc4f575b44f99bdc9398703322a4413 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/23878695d7788854da19099b7b55349d915b8272fd39b89acbe3ce96ec10141f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7626b244767fe03ddad4a52b9b5a6fcf3b12eb6b1301291b5a629fbb18177999 +size 10296 diff --git a/data/2025/2504_10xxx/2504.10686/images/24f47da16d2adcd8bc459c4799e0339a09ae21d1339ca0fdeda369a5c77fcfac.jpg b/data/2025/2504_10xxx/2504.10686/images/24f47da16d2adcd8bc459c4799e0339a09ae21d1339ca0fdeda369a5c77fcfac.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4ac08a33e57e846642bf76defa7d808cf8c98b88 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/24f47da16d2adcd8bc459c4799e0339a09ae21d1339ca0fdeda369a5c77fcfac.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c3544b871fcd718184877c57a620896c4bda456571908356f437b9f8a9e24c9c +size 4593 diff --git a/data/2025/2504_10xxx/2504.10686/images/253833a7e355d218a6f8858267ec14826e3afdeb8733a9e206f03c2e38f8543b.jpg b/data/2025/2504_10xxx/2504.10686/images/253833a7e355d218a6f8858267ec14826e3afdeb8733a9e206f03c2e38f8543b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..34f92e49fe4537b5a828852631f12398736d3b79 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/253833a7e355d218a6f8858267ec14826e3afdeb8733a9e206f03c2e38f8543b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a64970b36d2b01c7a8874357f99b6e6999df4fe9dc9a1fb9ddeb603fa8a422e2 +size 92831 diff --git a/data/2025/2504_10xxx/2504.10686/images/2b41b73c636b5b1a0ad078220052dac9269ef257e790e5d38e6d2a351163d0b6.jpg b/data/2025/2504_10xxx/2504.10686/images/2b41b73c636b5b1a0ad078220052dac9269ef257e790e5d38e6d2a351163d0b6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9712f56b5ca61a1d73b6f680d4aa53f8403b85aa --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/2b41b73c636b5b1a0ad078220052dac9269ef257e790e5d38e6d2a351163d0b6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d5ee62b85d8b7cce53eb0eb56c525d5a5be9f501e3f051a6ea845de0ab049dc9 +size 8489 diff --git a/data/2025/2504_10xxx/2504.10686/images/2c3df1452537de9ebf29c6ff074f05d09538ce0acd4db76b079e71e1940d1cb2.jpg b/data/2025/2504_10xxx/2504.10686/images/2c3df1452537de9ebf29c6ff074f05d09538ce0acd4db76b079e71e1940d1cb2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..522e05b8be1572d9339049253357e39daecbc3a3 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/2c3df1452537de9ebf29c6ff074f05d09538ce0acd4db76b079e71e1940d1cb2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:41fe6c868a1a7c4faef9881dba71d4c0c41a15447e624be1c100fb745f63b060 +size 34026 diff --git a/data/2025/2504_10xxx/2504.10686/images/2d645b7edb13434af06462871050d1113b7f8262449bfe3d9336cd691170f68d.jpg b/data/2025/2504_10xxx/2504.10686/images/2d645b7edb13434af06462871050d1113b7f8262449bfe3d9336cd691170f68d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d6160c734aa8b366edec4e37e4ffc74c2b9b82d3 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/2d645b7edb13434af06462871050d1113b7f8262449bfe3d9336cd691170f68d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c4ffa8bd4200c323abd4255adc0ebdc6ad17a93b013736c36bd2dad3062abe90 +size 1298 diff --git a/data/2025/2504_10xxx/2504.10686/images/2f6ed61f123f2e629cc3e4c864db9d8028dac5dc9b39c42245ef1a5b9060f050.jpg b/data/2025/2504_10xxx/2504.10686/images/2f6ed61f123f2e629cc3e4c864db9d8028dac5dc9b39c42245ef1a5b9060f050.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d562322baa40c2bc17cfcbf54669e0f9f52da7c7 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/2f6ed61f123f2e629cc3e4c864db9d8028dac5dc9b39c42245ef1a5b9060f050.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5ca311bd2ab00b7485e0118ab3e153f6333a54b6ad956d01777f31d426054959 +size 8371 diff --git a/data/2025/2504_10xxx/2504.10686/images/308e52fdb5c90b5da45b454cd7914587960c012e3e6058e86a7865d419d13375.jpg b/data/2025/2504_10xxx/2504.10686/images/308e52fdb5c90b5da45b454cd7914587960c012e3e6058e86a7865d419d13375.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e5c5ff455678f5ec12add423f0b53098ff0b20ba --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/308e52fdb5c90b5da45b454cd7914587960c012e3e6058e86a7865d419d13375.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f52c04e9ae13f3a8de220cc6954bf25df82e28f08772646f970293b376cc04ae +size 52618 diff --git a/data/2025/2504_10xxx/2504.10686/images/315b1535dd1f00ca6ee3afb2d0d04420e97751c2f72c52d889169d1533ff716f.jpg b/data/2025/2504_10xxx/2504.10686/images/315b1535dd1f00ca6ee3afb2d0d04420e97751c2f72c52d889169d1533ff716f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..99aa7312838946ea5b49283a99d8f9c0ef048de1 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/315b1535dd1f00ca6ee3afb2d0d04420e97751c2f72c52d889169d1533ff716f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:63a33a507933ab9ff80b2d1d953bf41f8e6b79af67a6dd8f105419412f16dc31 +size 5455 diff --git a/data/2025/2504_10xxx/2504.10686/images/34d5a6b39ed4f2d0efda79eb187a09581f21d2d9c28410b6ab26705d2c94fb35.jpg b/data/2025/2504_10xxx/2504.10686/images/34d5a6b39ed4f2d0efda79eb187a09581f21d2d9c28410b6ab26705d2c94fb35.jpg new file mode 100644 index 0000000000000000000000000000000000000000..da0c50db0a269d662b89f1f453bf6304854c26ce --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/34d5a6b39ed4f2d0efda79eb187a09581f21d2d9c28410b6ab26705d2c94fb35.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cbf4176fbbc7dc084ced314b6c14c714fe90a6e6d254ec9ad790eefbe772957d +size 14550 diff --git a/data/2025/2504_10xxx/2504.10686/images/35623c5612d71338e09941264b96e83f4fddcf408b0156ea332533ff5d0bbd24.jpg b/data/2025/2504_10xxx/2504.10686/images/35623c5612d71338e09941264b96e83f4fddcf408b0156ea332533ff5d0bbd24.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9668727b400d15f59e9307ea6b861c2d4c6e66bc --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/35623c5612d71338e09941264b96e83f4fddcf408b0156ea332533ff5d0bbd24.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:63a85a933b9cd8618894cfe28e1bced911cba4d95a66fce1961d47a986451ed3 +size 7931 diff --git a/data/2025/2504_10xxx/2504.10686/images/36311b6faf268e669dcd643228db99b83d29abb2a47e421a048368d4aa625818.jpg b/data/2025/2504_10xxx/2504.10686/images/36311b6faf268e669dcd643228db99b83d29abb2a47e421a048368d4aa625818.jpg new file mode 100644 index 0000000000000000000000000000000000000000..201524a7077c7bd0d182ca2a9614fc2d373f91e2 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/36311b6faf268e669dcd643228db99b83d29abb2a47e421a048368d4aa625818.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:524fbb2bb7f0401d6ef870e7e80719c8e5720771d4cba4c44666316a50db7969 +size 79559 diff --git a/data/2025/2504_10xxx/2504.10686/images/39e5ebdf23d3857b266c36552ee68e47923ad4ffcfb55aa17c77ee301d9f96b3.jpg b/data/2025/2504_10xxx/2504.10686/images/39e5ebdf23d3857b266c36552ee68e47923ad4ffcfb55aa17c77ee301d9f96b3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..82fbdde72bd5320292f860cc0a7fcd1af2f879a4 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/39e5ebdf23d3857b266c36552ee68e47923ad4ffcfb55aa17c77ee301d9f96b3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:394fe8707651507093542de0b0708c482c6c63332dfe5a0c59420363c144cd1a +size 30410 diff --git a/data/2025/2504_10xxx/2504.10686/images/3f67cc192541e998eec00480a1a2492eb605099f89e6c32a785e3c3a27965043.jpg b/data/2025/2504_10xxx/2504.10686/images/3f67cc192541e998eec00480a1a2492eb605099f89e6c32a785e3c3a27965043.jpg new file mode 100644 index 0000000000000000000000000000000000000000..03e97031bf375ed58c862f0064b31ba8f0cebbbe --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/3f67cc192541e998eec00480a1a2492eb605099f89e6c32a785e3c3a27965043.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ecbd29ecd803f0620013e4b4f2edc944f40966cf34144a16fdaac2942f84e60f +size 3849 diff --git a/data/2025/2504_10xxx/2504.10686/images/417ed3a4f6c7a940301d0c59980746b58cddeb2e843610fa81ff47d3e406ebca.jpg b/data/2025/2504_10xxx/2504.10686/images/417ed3a4f6c7a940301d0c59980746b58cddeb2e843610fa81ff47d3e406ebca.jpg new file mode 100644 index 0000000000000000000000000000000000000000..089aef062f2cb3488575e62c4c54a9447f755797 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/417ed3a4f6c7a940301d0c59980746b58cddeb2e843610fa81ff47d3e406ebca.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:74eda67268e4e485135ffc73ea5677a922a76f9a9aed7a76d58f9d71d6f816a7 +size 1326 diff --git a/data/2025/2504_10xxx/2504.10686/images/41a39eaf7e655d5f51d27cf6b8fee4f71730fb177f34e3c0bae53e75a366f369.jpg b/data/2025/2504_10xxx/2504.10686/images/41a39eaf7e655d5f51d27cf6b8fee4f71730fb177f34e3c0bae53e75a366f369.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4e9a3003d9cb64a58a3254d972d074257f127967 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/41a39eaf7e655d5f51d27cf6b8fee4f71730fb177f34e3c0bae53e75a366f369.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c798d7d62a85501e1a391d84a85514d0b8699327009f0ffda13ea91bfda5bdef +size 73100 diff --git a/data/2025/2504_10xxx/2504.10686/images/449a23221e1f675fa538f3ab016b13b78bd4d647f4c1a5ea675c158ab5a86d85.jpg b/data/2025/2504_10xxx/2504.10686/images/449a23221e1f675fa538f3ab016b13b78bd4d647f4c1a5ea675c158ab5a86d85.jpg new file mode 100644 index 0000000000000000000000000000000000000000..92f7f946429d2815d6656cd7bc0af77668e9e081 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/449a23221e1f675fa538f3ab016b13b78bd4d647f4c1a5ea675c158ab5a86d85.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:008b4cfc920986406019632b231b4562ac3a33fa05a26d9417b889631f62ba14 +size 15560 diff --git a/data/2025/2504_10xxx/2504.10686/images/44d502a6c0ed805d464c3c6f19148e6c1e459ecc1aa0d51d667b715fea387a24.jpg b/data/2025/2504_10xxx/2504.10686/images/44d502a6c0ed805d464c3c6f19148e6c1e459ecc1aa0d51d667b715fea387a24.jpg new file mode 100644 index 0000000000000000000000000000000000000000..44fcd8e3f3209e9981a34b16e524ec5c65f92e33 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/44d502a6c0ed805d464c3c6f19148e6c1e459ecc1aa0d51d667b715fea387a24.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:edd31d16fde26948802af383475ac847605829d8c327ec29492b0cef72999277 +size 16855 diff --git a/data/2025/2504_10xxx/2504.10686/images/47a8f5767564f21bec0d29e6ae704dea3d6b3837b42286dffc841e399e9070d8.jpg b/data/2025/2504_10xxx/2504.10686/images/47a8f5767564f21bec0d29e6ae704dea3d6b3837b42286dffc841e399e9070d8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2a82efb191209ab004fb01c7ab323690671439f0 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/47a8f5767564f21bec0d29e6ae704dea3d6b3837b42286dffc841e399e9070d8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9c394c259bb946c45bc8372971fe3cd4a530f01ac3b9bbcbaaa80e09445f0155 +size 1369 diff --git a/data/2025/2504_10xxx/2504.10686/images/488b1bb587aaf16cfb309ec09954a5b493357a03ceefef34c9d2e6752fd2a5c6.jpg b/data/2025/2504_10xxx/2504.10686/images/488b1bb587aaf16cfb309ec09954a5b493357a03ceefef34c9d2e6752fd2a5c6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b99fefdf64f2f64ddf14d02ebabe1d47c1e1dd33 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/488b1bb587aaf16cfb309ec09954a5b493357a03ceefef34c9d2e6752fd2a5c6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a457d85c20ede1d3f02d3bc47456d059dc1947dd10797380cc35add56c4f9e1b +size 4210 diff --git a/data/2025/2504_10xxx/2504.10686/images/496abd7bd595907443e0670187e23f1f4d56c40d11a3074d787694ea1ce40318.jpg b/data/2025/2504_10xxx/2504.10686/images/496abd7bd595907443e0670187e23f1f4d56c40d11a3074d787694ea1ce40318.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a468f336de35d5a0ff864d0ce5e81b30f71e05cc --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/496abd7bd595907443e0670187e23f1f4d56c40d11a3074d787694ea1ce40318.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f56bcd2baca771d4147cbbacada1489618d300c4850942458545df9fc02c468b +size 45471 diff --git a/data/2025/2504_10xxx/2504.10686/images/49df396ea37e71a78da20d8f92ed483037fdc7b9986cdfebb18dda7676b67431.jpg b/data/2025/2504_10xxx/2504.10686/images/49df396ea37e71a78da20d8f92ed483037fdc7b9986cdfebb18dda7676b67431.jpg new file mode 100644 index 0000000000000000000000000000000000000000..42c59e79dfad523d484aa4c5305b98663e1587bb --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/49df396ea37e71a78da20d8f92ed483037fdc7b9986cdfebb18dda7676b67431.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:569842cdc89f0ae89b7c85b450d06ec9afc955ca1fbae2bb4872b3e8ddab4695 +size 11912 diff --git a/data/2025/2504_10xxx/2504.10686/images/4b7b31467018d189fc76958a39af5df580c215fa4fa8a574934ee8b7d2b699dd.jpg b/data/2025/2504_10xxx/2504.10686/images/4b7b31467018d189fc76958a39af5df580c215fa4fa8a574934ee8b7d2b699dd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f29f12a94556e9bca9c78cc7e3bd9a15a004b1d0 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/4b7b31467018d189fc76958a39af5df580c215fa4fa8a574934ee8b7d2b699dd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:85f84bc40405d4602a1ae8e0b2d4970471f1c8653e0789286c4864519204e6b7 +size 99144 diff --git a/data/2025/2504_10xxx/2504.10686/images/4c3c5b5fbca74d5852049084b3b57cd3a1d788d88fbe4b9e13984f18476f973b.jpg b/data/2025/2504_10xxx/2504.10686/images/4c3c5b5fbca74d5852049084b3b57cd3a1d788d88fbe4b9e13984f18476f973b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6190d68a51abd507b01e5211115c6fd8f030ea14 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/4c3c5b5fbca74d5852049084b3b57cd3a1d788d88fbe4b9e13984f18476f973b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ea040814e524a62f0071e925f3a3a60e24cf7d00dfeb45183865bb12b4778f64 +size 3394 diff --git a/data/2025/2504_10xxx/2504.10686/images/4ec5aed984692f2599a018cf1a8613857c620577add3863933589b6b58f6373d.jpg b/data/2025/2504_10xxx/2504.10686/images/4ec5aed984692f2599a018cf1a8613857c620577add3863933589b6b58f6373d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5ef3c3cb9975426b5132820f8d15f1553a60da78 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/4ec5aed984692f2599a018cf1a8613857c620577add3863933589b6b58f6373d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:47303e20d2b81a0b90b78b144110ed111a83eb2e257f363efd22bf0a0e54cade +size 4440 diff --git a/data/2025/2504_10xxx/2504.10686/images/50d512d971059ff34fadef4244b80d50193d8309a9a13d88ca8dfe1dfda61946.jpg b/data/2025/2504_10xxx/2504.10686/images/50d512d971059ff34fadef4244b80d50193d8309a9a13d88ca8dfe1dfda61946.jpg new file mode 100644 index 0000000000000000000000000000000000000000..35822ab54d87121be9aa279e9ee1d08e958e5c33 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/50d512d971059ff34fadef4244b80d50193d8309a9a13d88ca8dfe1dfda61946.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fe11bfc1d065a17af04fca938d35d45ebd709835ca23140e6711852dad0f18d6 +size 1351 diff --git a/data/2025/2504_10xxx/2504.10686/images/52d45502cb2dd3aee6acddbf1a133f0ba433b74e9d56e8aac335b471219a8fa6.jpg b/data/2025/2504_10xxx/2504.10686/images/52d45502cb2dd3aee6acddbf1a133f0ba433b74e9d56e8aac335b471219a8fa6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8822fc9bc5c5a5ac51ee97ba77848285a978799d --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/52d45502cb2dd3aee6acddbf1a133f0ba433b74e9d56e8aac335b471219a8fa6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d95a5f8624129e4572c80f671104415e1accdeda993c6b3630c9a402392c0f6b +size 3258 diff --git a/data/2025/2504_10xxx/2504.10686/images/5aeaf5ee1c5610a62b97273c69623209ee7d7db802eb52f815951d41d89ec85f.jpg b/data/2025/2504_10xxx/2504.10686/images/5aeaf5ee1c5610a62b97273c69623209ee7d7db802eb52f815951d41d89ec85f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7f820999e306b79342814825ae1d5b23cd144338 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/5aeaf5ee1c5610a62b97273c69623209ee7d7db802eb52f815951d41d89ec85f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d5ff2f455cb74770213b2f0a7dd0a41ac80ae1238e127b9cb42116e3e411bbcf +size 16139 diff --git a/data/2025/2504_10xxx/2504.10686/images/5c4fffafcd87b05c0eb660d6ac051e73323321b85c6274fe2d1d425f63153e14.jpg b/data/2025/2504_10xxx/2504.10686/images/5c4fffafcd87b05c0eb660d6ac051e73323321b85c6274fe2d1d425f63153e14.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2a92282826aa32da38ad3455e0558b0dc07b2720 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/5c4fffafcd87b05c0eb660d6ac051e73323321b85c6274fe2d1d425f63153e14.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e0b2de29b7c59ee4378e9eaea88c44efe02700307ce995a3a1f744be7eaed8bf +size 1204 diff --git a/data/2025/2504_10xxx/2504.10686/images/5ccecde1d3ef823534fdf75e86042b78c4e74ce2fc7138ff98d2bf2829190011.jpg b/data/2025/2504_10xxx/2504.10686/images/5ccecde1d3ef823534fdf75e86042b78c4e74ce2fc7138ff98d2bf2829190011.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f082559f52ea142e1b7130517820689bb5d90895 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/5ccecde1d3ef823534fdf75e86042b78c4e74ce2fc7138ff98d2bf2829190011.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:997ef0bf38b4271db6c87d0b79037f56ef6322f01049925ad0d4f66a7318d421 +size 4340 diff --git a/data/2025/2504_10xxx/2504.10686/images/5f12b3c080f9cb9562c8a1755391e2e1c37748366d569130074f4dda3ff55992.jpg b/data/2025/2504_10xxx/2504.10686/images/5f12b3c080f9cb9562c8a1755391e2e1c37748366d569130074f4dda3ff55992.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d3c59ae08b95cfb116d5c5174bf9125b51a15e22 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/5f12b3c080f9cb9562c8a1755391e2e1c37748366d569130074f4dda3ff55992.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b0712860bf27636eb95cb10a7fac26e90b7a87acaa7d6dafd496356b7b2cc9c5 +size 27928 diff --git a/data/2025/2504_10xxx/2504.10686/images/60242a8eb145e6d82d6a9916b55caa48e484b5c2f5637f411347ab1d8bbf239f.jpg b/data/2025/2504_10xxx/2504.10686/images/60242a8eb145e6d82d6a9916b55caa48e484b5c2f5637f411347ab1d8bbf239f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2def17529d4fa5edffd231c36b793c67996460c0 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/60242a8eb145e6d82d6a9916b55caa48e484b5c2f5637f411347ab1d8bbf239f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c495bfdd3d92e8d90a8fe48f28314dc6051b49b013c2b9bccbb5ab06865cbf98 +size 20871 diff --git a/data/2025/2504_10xxx/2504.10686/images/629c7ce13a1f3e7c8602c026f3e125d7a476ca88f8ecc455afd74783f7e65b16.jpg b/data/2025/2504_10xxx/2504.10686/images/629c7ce13a1f3e7c8602c026f3e125d7a476ca88f8ecc455afd74783f7e65b16.jpg new file mode 100644 index 0000000000000000000000000000000000000000..089233fded1221bc6ccb580ad73563d20cc5c439 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/629c7ce13a1f3e7c8602c026f3e125d7a476ca88f8ecc455afd74783f7e65b16.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0431bbbe1d4578871eb10f91260b839c9c2ef75c2c5a9155a43105796d1425ed +size 3511 diff --git a/data/2025/2504_10xxx/2504.10686/images/6401cbf20b4191d272941b585c8d4f0adc6a86032cd4277e954bb6298446d34b.jpg b/data/2025/2504_10xxx/2504.10686/images/6401cbf20b4191d272941b585c8d4f0adc6a86032cd4277e954bb6298446d34b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0dd9ec58aa03f83e2926337f2f739a00702960d3 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/6401cbf20b4191d272941b585c8d4f0adc6a86032cd4277e954bb6298446d34b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:90b91fef758eabd698fd2ef05b4b17d9ae2f18179b1f5f86220eb50301bd74c5 +size 4377 diff --git a/data/2025/2504_10xxx/2504.10686/images/65bfab3183f17ccd152cdcd70375e893a18feaf358a53ada5a84bdc2975a7327.jpg b/data/2025/2504_10xxx/2504.10686/images/65bfab3183f17ccd152cdcd70375e893a18feaf358a53ada5a84bdc2975a7327.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3d4bf33bc6bdd4fd5f8d26b775b3132bf573ffc2 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/65bfab3183f17ccd152cdcd70375e893a18feaf358a53ada5a84bdc2975a7327.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ed50c56b48ef1d7d2e4993fe9848280e6c74ce5ddfd223ac0699f2f87d9ea7e5 +size 22971 diff --git a/data/2025/2504_10xxx/2504.10686/images/6ddd8335873af8c5a39067d510e2bb84a138cae90a3d4c71cc8cbfc3b65e5ffc.jpg b/data/2025/2504_10xxx/2504.10686/images/6ddd8335873af8c5a39067d510e2bb84a138cae90a3d4c71cc8cbfc3b65e5ffc.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bcbe2b84ba407aea77f6549dd61810de8aa18e57 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/6ddd8335873af8c5a39067d510e2bb84a138cae90a3d4c71cc8cbfc3b65e5ffc.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5a4e230f09c84a72eb8b67a6cab59510953883fb8ce655e3e2e60f10ba1bbb9d +size 46679 diff --git a/data/2025/2504_10xxx/2504.10686/images/6ea31666372ce72972d1e01c9f83342103a7a680943cd9cd751793bd9f5c5350.jpg b/data/2025/2504_10xxx/2504.10686/images/6ea31666372ce72972d1e01c9f83342103a7a680943cd9cd751793bd9f5c5350.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7b9375963442ba64b82618baf74bb11950e4911b --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/6ea31666372ce72972d1e01c9f83342103a7a680943cd9cd751793bd9f5c5350.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dc785d8e546d8c19df085da3b84a9660552b35268d9cf02fe58ebbd239e4c08a +size 14846 diff --git a/data/2025/2504_10xxx/2504.10686/images/77c96b623860559eacc549d8e973b55a52ac782e82292d36ce71b6afab9761ca.jpg b/data/2025/2504_10xxx/2504.10686/images/77c96b623860559eacc549d8e973b55a52ac782e82292d36ce71b6afab9761ca.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a484b1d3ae1265cc1b0a9c0e3048c78975d89624 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/77c96b623860559eacc549d8e973b55a52ac782e82292d36ce71b6afab9761ca.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:820313bfad94dbc24c9cb66c8ab5718591023052054d86c1e7852d96a04fc626 +size 83680 diff --git a/data/2025/2504_10xxx/2504.10686/images/7b0c3a3313a71ca32c35419030fc37f77e748481cabac6389750f2cfefd13a40.jpg b/data/2025/2504_10xxx/2504.10686/images/7b0c3a3313a71ca32c35419030fc37f77e748481cabac6389750f2cfefd13a40.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f2e21fa26f02a08118bb63e5e03d91a0c2317e56 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/7b0c3a3313a71ca32c35419030fc37f77e748481cabac6389750f2cfefd13a40.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bed6ad41d2332e2fcec150ed23c81af3a838d8b6e7d0e2d5fc13fdba60d39e80 +size 9089 diff --git a/data/2025/2504_10xxx/2504.10686/images/7b7ae85f98846211c1488657fe8c48f7df95a130ea033e4222c2cc126ad7abe3.jpg b/data/2025/2504_10xxx/2504.10686/images/7b7ae85f98846211c1488657fe8c48f7df95a130ea033e4222c2cc126ad7abe3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7c7cb794f7d7892c0c76d6f9dfa14d47ef1a14ca --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/7b7ae85f98846211c1488657fe8c48f7df95a130ea033e4222c2cc126ad7abe3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:63abc7b52a97b5b74e0c745b9a58d99959a2635c1d8ab1eca3c18f968ca671ae +size 1164 diff --git a/data/2025/2504_10xxx/2504.10686/images/7d49cf2eb698bccefc0d40589b7e4973c91e663cd6c23a2111a0cbded9d52ffc.jpg b/data/2025/2504_10xxx/2504.10686/images/7d49cf2eb698bccefc0d40589b7e4973c91e663cd6c23a2111a0cbded9d52ffc.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9e45824f67a9f5837bc6093778ae2f8d5bf6c140 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/7d49cf2eb698bccefc0d40589b7e4973c91e663cd6c23a2111a0cbded9d52ffc.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3c9fd84c7e687a683e5bb9b5bae039b3ed8e2d05bf61b02bdeef137a32e1529f +size 245696 diff --git a/data/2025/2504_10xxx/2504.10686/images/8625a75f76c2da91bb4d2e4dae9cd14a3e706d54d637ec9410ab4e46f76d0fe9.jpg b/data/2025/2504_10xxx/2504.10686/images/8625a75f76c2da91bb4d2e4dae9cd14a3e706d54d637ec9410ab4e46f76d0fe9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..17ab17fd2d29c01c37f50b4156fe4e8d871bfabe --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/8625a75f76c2da91bb4d2e4dae9cd14a3e706d54d637ec9410ab4e46f76d0fe9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:532708642fbbde62c33c6d5ff8e354dee4746e9a6a572639ca2f2f8f807ea884 +size 16386 diff --git a/data/2025/2504_10xxx/2504.10686/images/8ac9e00d1996213e6f79f7b908791efe8ef055eead3451ed8eaed1fab8097e08.jpg b/data/2025/2504_10xxx/2504.10686/images/8ac9e00d1996213e6f79f7b908791efe8ef055eead3451ed8eaed1fab8097e08.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f221f14690ea3fe6d7e4305b220fda067d68ef57 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/8ac9e00d1996213e6f79f7b908791efe8ef055eead3451ed8eaed1fab8097e08.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6bed2d90e7de8f8b8d408c3504e5d430beb92ac50b05a4f591bf81c07c9ebf83 +size 75245 diff --git a/data/2025/2504_10xxx/2504.10686/images/8bd2ef8051dea56cf5a345b62b7708f0cb0526c294db8e195419e3b299cee319.jpg b/data/2025/2504_10xxx/2504.10686/images/8bd2ef8051dea56cf5a345b62b7708f0cb0526c294db8e195419e3b299cee319.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bcd22b60871700955c789b3250dd5d251569b98f --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/8bd2ef8051dea56cf5a345b62b7708f0cb0526c294db8e195419e3b299cee319.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8510d82dd9da5bd4a508aa7e0c6567767d054a8c08870f7421a340353d5c7de1 +size 23516 diff --git a/data/2025/2504_10xxx/2504.10686/images/8cbbb99fffc40ec7d3df107176ab0e1d25d99c8a663aeb0e008ad9ce578e6cc3.jpg b/data/2025/2504_10xxx/2504.10686/images/8cbbb99fffc40ec7d3df107176ab0e1d25d99c8a663aeb0e008ad9ce578e6cc3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..73bd84f305f1e4969544c6dfeba248659cf5387f --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/8cbbb99fffc40ec7d3df107176ab0e1d25d99c8a663aeb0e008ad9ce578e6cc3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6e00f0b45d2615a80c8cdd2e55410e4c44201981140ea53b7e9ff60cfff1c080 +size 5005 diff --git a/data/2025/2504_10xxx/2504.10686/images/8cf587e3cbc1927fbca4656b8736d9f84ac6f220c9227c1ad73401744af36b10.jpg b/data/2025/2504_10xxx/2504.10686/images/8cf587e3cbc1927fbca4656b8736d9f84ac6f220c9227c1ad73401744af36b10.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7e6c3d3a89d7c905111f31b833929622ff419db6 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/8cf587e3cbc1927fbca4656b8736d9f84ac6f220c9227c1ad73401744af36b10.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:19c475a7421a7eb18ab478daa4ee284cf602e35d4854242cfd400612f737ae41 +size 48697 diff --git a/data/2025/2504_10xxx/2504.10686/images/9b50cbffaa73a484b07fe9e673836900b5b744821aa2f3a6b5870cf1b1837401.jpg b/data/2025/2504_10xxx/2504.10686/images/9b50cbffaa73a484b07fe9e673836900b5b744821aa2f3a6b5870cf1b1837401.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b5a6e4b738e4a0bdfafc5441daafde6b8e415dee --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/9b50cbffaa73a484b07fe9e673836900b5b744821aa2f3a6b5870cf1b1837401.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e25564abacddb13b7b119e909da943f599e66325b5c4bb9bf6848ac3aa459c3f +size 299588 diff --git a/data/2025/2504_10xxx/2504.10686/images/9bbcbdf88644d05c0209ff8adeee4dc89fcb240fb6ca41121a1750176f9fa5bd.jpg b/data/2025/2504_10xxx/2504.10686/images/9bbcbdf88644d05c0209ff8adeee4dc89fcb240fb6ca41121a1750176f9fa5bd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e1145fff5fa41bce646f6e46e3de88057973c51a --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/9bbcbdf88644d05c0209ff8adeee4dc89fcb240fb6ca41121a1750176f9fa5bd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:01d1833c9772c9298afed4912283d23b2e2a460ca2c6e1defc06ad513fc66eb9 +size 41672 diff --git a/data/2025/2504_10xxx/2504.10686/images/9d57b96f81dfb69c25ced351b0ea2d15b1355523d983f5145ab00fb53b103f3a.jpg b/data/2025/2504_10xxx/2504.10686/images/9d57b96f81dfb69c25ced351b0ea2d15b1355523d983f5145ab00fb53b103f3a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9fa58155c3944269249b11d66f41906f50600708 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/9d57b96f81dfb69c25ced351b0ea2d15b1355523d983f5145ab00fb53b103f3a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:84138051c2a0a29d7df2465d374c7fc40abae089d88a522e358bbc27d304c55a +size 4138 diff --git a/data/2025/2504_10xxx/2504.10686/images/a2c01cfe6e304254748277a8f823d85316e6c8e8774d19c15de4428bf13a838f.jpg b/data/2025/2504_10xxx/2504.10686/images/a2c01cfe6e304254748277a8f823d85316e6c8e8774d19c15de4428bf13a838f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c1f549ee184003a71fd9abd8f682048f0b9080bf --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/a2c01cfe6e304254748277a8f823d85316e6c8e8774d19c15de4428bf13a838f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dd1925ed6e32122e9cd82098bdda232f37b70b4ce63e28dd5038c99c72a00a1c +size 31373 diff --git a/data/2025/2504_10xxx/2504.10686/images/a34fd9595397439c984a401aa9617a0634a85b8e638fa7ea12403f01e0a3c2f6.jpg b/data/2025/2504_10xxx/2504.10686/images/a34fd9595397439c984a401aa9617a0634a85b8e638fa7ea12403f01e0a3c2f6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8aff48108e48725c2724788780138892c18653d4 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/a34fd9595397439c984a401aa9617a0634a85b8e638fa7ea12403f01e0a3c2f6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d27c780d4002ff848c8f2eaf32ff486d89c3947876110a6d535a5b87236d82b4 +size 64413 diff --git a/data/2025/2504_10xxx/2504.10686/images/aad7982650cb376bfab88d1f41e6514c27a63d4ddf9e751f866833b7ce6411d9.jpg b/data/2025/2504_10xxx/2504.10686/images/aad7982650cb376bfab88d1f41e6514c27a63d4ddf9e751f866833b7ce6411d9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..73dd5c41cdfeb14053d18d6278e84c5a8760c770 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/aad7982650cb376bfab88d1f41e6514c27a63d4ddf9e751f866833b7ce6411d9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ec86fb21eeffa15420c9afe50e0b7958619011c376dad0bb42d86b61dab7ab45 +size 39654 diff --git a/data/2025/2504_10xxx/2504.10686/images/acb7e256b36e27fbb9227c2c97f2747745796ad01d4a69839743b8c4c6ab22db.jpg b/data/2025/2504_10xxx/2504.10686/images/acb7e256b36e27fbb9227c2c97f2747745796ad01d4a69839743b8c4c6ab22db.jpg new file mode 100644 index 0000000000000000000000000000000000000000..77bc8765da74f712e2417b9c45c8a72f4b2f1017 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/acb7e256b36e27fbb9227c2c97f2747745796ad01d4a69839743b8c4c6ab22db.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5a02786fac0d323f01c5e7f13c41398a1f027b1028fd8d8064eb62c344ec010a +size 12560 diff --git a/data/2025/2504_10xxx/2504.10686/images/aeb111111f6a66fd1e33711c57d02e8b37f987757aecc478ba83f7f117f8f563.jpg b/data/2025/2504_10xxx/2504.10686/images/aeb111111f6a66fd1e33711c57d02e8b37f987757aecc478ba83f7f117f8f563.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f3c1e2ceef9148b741ed26a64ed889f115f7652e --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/aeb111111f6a66fd1e33711c57d02e8b37f987757aecc478ba83f7f117f8f563.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1fc26ad7fd86f7ab1f40dc6936366738e51a0ecc9771d2ed3cb1a410cb80a136 +size 32766 diff --git a/data/2025/2504_10xxx/2504.10686/images/b0e310266127b1fd1ba2aeb6da985df30d147f9c916beb447053487d3ee36743.jpg b/data/2025/2504_10xxx/2504.10686/images/b0e310266127b1fd1ba2aeb6da985df30d147f9c916beb447053487d3ee36743.jpg new file mode 100644 index 0000000000000000000000000000000000000000..eafa1ab8a7b93cd6c245d07746a7aaf3b82f126e --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/b0e310266127b1fd1ba2aeb6da985df30d147f9c916beb447053487d3ee36743.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cf23460ec2b9a7a82ee2b8f14b689c839500b62a38195c0f0929eb5a54416fcf +size 10374 diff --git a/data/2025/2504_10xxx/2504.10686/images/b1af22d432546be3d58f726de7b0d76a5692472560a730d2902ecb22dbc465ac.jpg b/data/2025/2504_10xxx/2504.10686/images/b1af22d432546be3d58f726de7b0d76a5692472560a730d2902ecb22dbc465ac.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b87d36b1f152d83adabe98c58ab2b69b11a86eb1 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/b1af22d432546be3d58f726de7b0d76a5692472560a730d2902ecb22dbc465ac.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:80e4b7fd08e1fc5ea30dbba28459bf00bbf6aa294a6b08008dcb900064e7b959 +size 27114 diff --git a/data/2025/2504_10xxx/2504.10686/images/b2dafe71e9e3063b6f4e6e0d7fe0c81118d087eca6c5fbfdf98428625d1d76de.jpg b/data/2025/2504_10xxx/2504.10686/images/b2dafe71e9e3063b6f4e6e0d7fe0c81118d087eca6c5fbfdf98428625d1d76de.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a01e6360617840848761be8793b6d80d863dcd2c --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/b2dafe71e9e3063b6f4e6e0d7fe0c81118d087eca6c5fbfdf98428625d1d76de.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9e7e81eb9259602ab0234acc663284c404a06091eee2ec94b55c2d1200300f7a +size 28022 diff --git a/data/2025/2504_10xxx/2504.10686/images/b48235d2ae7a52f0f87af13e385c348076ea1d6bd0051f75995284647bdfd624.jpg b/data/2025/2504_10xxx/2504.10686/images/b48235d2ae7a52f0f87af13e385c348076ea1d6bd0051f75995284647bdfd624.jpg new file mode 100644 index 0000000000000000000000000000000000000000..de348da6ffd01ea22351cdf225dfcc4df3d44d5f --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/b48235d2ae7a52f0f87af13e385c348076ea1d6bd0051f75995284647bdfd624.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9f98d078cbc702a6726f0110723d52cfcd6c92249134957390ae420494f5cb3e +size 13823 diff --git a/data/2025/2504_10xxx/2504.10686/images/b758339bd61eafff580164a4527468d8f542327ebe7a1f14835a8397e41c3165.jpg b/data/2025/2504_10xxx/2504.10686/images/b758339bd61eafff580164a4527468d8f542327ebe7a1f14835a8397e41c3165.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d145917e787b145f4db6375ef45f7833ffc542db --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/b758339bd61eafff580164a4527468d8f542327ebe7a1f14835a8397e41c3165.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ad0293f638761fca0df38a6ef78d14120e4d89d3d771c01888ad793129fb7d9d +size 10669 diff --git a/data/2025/2504_10xxx/2504.10686/images/bd994d0588e6812fed53823ee522107759f02c0a3d97cd1a32939c53e159da5f.jpg b/data/2025/2504_10xxx/2504.10686/images/bd994d0588e6812fed53823ee522107759f02c0a3d97cd1a32939c53e159da5f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c81f5e0faf2568b98f426cf9f3318fb97350f93c --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/bd994d0588e6812fed53823ee522107759f02c0a3d97cd1a32939c53e159da5f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:042863ff4ba8ab675df722c67a354c76826f8138f2941cc2bcd4e0d25516aa1b +size 33107 diff --git a/data/2025/2504_10xxx/2504.10686/images/bdc0342fa5b6993dbb6a400dc2c09954c71f33a178c369230a05d096b5bac363.jpg b/data/2025/2504_10xxx/2504.10686/images/bdc0342fa5b6993dbb6a400dc2c09954c71f33a178c369230a05d096b5bac363.jpg new file mode 100644 index 0000000000000000000000000000000000000000..abba7e57af15dcfba86cd50a202d71937f1763b9 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/bdc0342fa5b6993dbb6a400dc2c09954c71f33a178c369230a05d096b5bac363.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6aff314f5464d9818b142e0767682264049be77bf41fae10dc2f2e3f0c3f681c +size 3302 diff --git a/data/2025/2504_10xxx/2504.10686/images/c2192d342d64556a1a2860e014da3529e5c2dc30bbd7246a3a3f93bc4b03ebb7.jpg b/data/2025/2504_10xxx/2504.10686/images/c2192d342d64556a1a2860e014da3529e5c2dc30bbd7246a3a3f93bc4b03ebb7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9ee2a664daa0f2d0c681a50a7f0d09c01d6fee25 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/c2192d342d64556a1a2860e014da3529e5c2dc30bbd7246a3a3f93bc4b03ebb7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:797f92e59fe454fe8060065b434259b044f69ebf0750faaec58f8cfbefba288c +size 27465 diff --git a/data/2025/2504_10xxx/2504.10686/images/c40b65ab994e2395bc7a92b4cda211ba91920ed236cf7a59b3f243698618b855.jpg b/data/2025/2504_10xxx/2504.10686/images/c40b65ab994e2395bc7a92b4cda211ba91920ed236cf7a59b3f243698618b855.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f42978207adc88aa2d58a6b1205d0599bd5b8b45 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/c40b65ab994e2395bc7a92b4cda211ba91920ed236cf7a59b3f243698618b855.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:882835e06982d6164f26cddf007c0323ecf1c564929bed681f0802fd522ab8b7 +size 47003 diff --git a/data/2025/2504_10xxx/2504.10686/images/c451da8a344672c77b071621cc948448afa145e2ceeb268b5c838603f9423ea7.jpg b/data/2025/2504_10xxx/2504.10686/images/c451da8a344672c77b071621cc948448afa145e2ceeb268b5c838603f9423ea7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e1b48487084a9da5e7c9cb9ecf49b0799ff4c8eb --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/c451da8a344672c77b071621cc948448afa145e2ceeb268b5c838603f9423ea7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6618ebe1f2f0a1359a0ad88b746cea39f81f842ee83b5bdd845574b49b04854d +size 4716 diff --git a/data/2025/2504_10xxx/2504.10686/images/cb4d48375367e6c3831213b678c17fc76e97295cb24d5c23662571f9e2f19896.jpg b/data/2025/2504_10xxx/2504.10686/images/cb4d48375367e6c3831213b678c17fc76e97295cb24d5c23662571f9e2f19896.jpg new file mode 100644 index 0000000000000000000000000000000000000000..220afe165200998a0d62aa9c1cc240f626a206e6 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/cb4d48375367e6c3831213b678c17fc76e97295cb24d5c23662571f9e2f19896.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fe76adaf33cc9325ba5d4397f05ede2d37cb45470fb41a40e0ad1f0a0f2657e7 +size 20367 diff --git a/data/2025/2504_10xxx/2504.10686/images/cba831f90e681281e70d07836d242412ebf29f6e98714494d5a04829c493e39c.jpg b/data/2025/2504_10xxx/2504.10686/images/cba831f90e681281e70d07836d242412ebf29f6e98714494d5a04829c493e39c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6565695d48ed33484afeb6dde0549e947b45ee66 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/cba831f90e681281e70d07836d242412ebf29f6e98714494d5a04829c493e39c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2c6c1818804ab43d9190b0b71760f883698b052f08ad39a4db4b1905834b9f4c +size 22701 diff --git a/data/2025/2504_10xxx/2504.10686/images/cd71cb66b2605332b1a6f6ce4dc15f144a84d5fa912a642fd7c10063bb2be48b.jpg b/data/2025/2504_10xxx/2504.10686/images/cd71cb66b2605332b1a6f6ce4dc15f144a84d5fa912a642fd7c10063bb2be48b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0cf8348cdb926b02be37c2d9362133ee9eb418d4 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/cd71cb66b2605332b1a6f6ce4dc15f144a84d5fa912a642fd7c10063bb2be48b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:87769b39418b529e1c0b01550bbe48f9386a847d353be634c48f64c964adbc0e +size 34014 diff --git a/data/2025/2504_10xxx/2504.10686/images/ce83c12967ac731688262fcbc99d860413edf99aaafa7cd0270031e44871e97d.jpg b/data/2025/2504_10xxx/2504.10686/images/ce83c12967ac731688262fcbc99d860413edf99aaafa7cd0270031e44871e97d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..de17ef0b20ab71266f46e4d1ec6ab1c2ae22a789 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/ce83c12967ac731688262fcbc99d860413edf99aaafa7cd0270031e44871e97d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d22f3eee65b1aed7a9a67ca5e33900e01e2e3ce9f80d50fe68c64d36a34207ab +size 3767 diff --git a/data/2025/2504_10xxx/2504.10686/images/d0ea7d9b2ecaf56aa13a4f1b039a13ff28f10a58d64cc690912ef0f4e806004e.jpg b/data/2025/2504_10xxx/2504.10686/images/d0ea7d9b2ecaf56aa13a4f1b039a13ff28f10a58d64cc690912ef0f4e806004e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ee206b497b675c11f73ff025247f318397730240 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/d0ea7d9b2ecaf56aa13a4f1b039a13ff28f10a58d64cc690912ef0f4e806004e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d4a314ef7a1e4bd1aa87098a1d7e7c487f19294e532c22ff6c615f40a96fa31b +size 14711 diff --git a/data/2025/2504_10xxx/2504.10686/images/d48024e2b80356cc2faa2c14f1af7be0760ff89822241d890034e1dceb40e2d0.jpg b/data/2025/2504_10xxx/2504.10686/images/d48024e2b80356cc2faa2c14f1af7be0760ff89822241d890034e1dceb40e2d0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4da4145f14a2125bc2c70b0ca4e2fbf8b4896ebc --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/d48024e2b80356cc2faa2c14f1af7be0760ff89822241d890034e1dceb40e2d0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6a3fc070c00e66aaab52b5d1541713ea9ce57238f2d9b42375971307219a0d6f +size 74496 diff --git a/data/2025/2504_10xxx/2504.10686/images/d5280ba3c0c422f04fb814ae78615d895235df2419f06168de54ab34712b08dd.jpg b/data/2025/2504_10xxx/2504.10686/images/d5280ba3c0c422f04fb814ae78615d895235df2419f06168de54ab34712b08dd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7543f695b9ce8d133305c5bb0dd0f2ae228bc298 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/d5280ba3c0c422f04fb814ae78615d895235df2419f06168de54ab34712b08dd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1ca3796717a8ca6f9a68fe52bfd9476635d893d513d034b197b9c5b14b6b1288 +size 14112 diff --git a/data/2025/2504_10xxx/2504.10686/images/d8575e44d65aceb2fd23b5bd961f7c359c9fc2f3717d1291c9ae893c01490dc3.jpg b/data/2025/2504_10xxx/2504.10686/images/d8575e44d65aceb2fd23b5bd961f7c359c9fc2f3717d1291c9ae893c01490dc3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c77620746ddef08139d9b204669025178ff82382 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/d8575e44d65aceb2fd23b5bd961f7c359c9fc2f3717d1291c9ae893c01490dc3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f577bb4e1cb5dd169fef665dbdb02e331211d7569ed740e48216d3df68fc8f76 +size 14188 diff --git a/data/2025/2504_10xxx/2504.10686/images/de2cac9923bfa4f52967dd4330cfae9d5dfebaa792800e8bb8bb19662aebe5ea.jpg b/data/2025/2504_10xxx/2504.10686/images/de2cac9923bfa4f52967dd4330cfae9d5dfebaa792800e8bb8bb19662aebe5ea.jpg new file mode 100644 index 0000000000000000000000000000000000000000..32d1aa081fdd2c90b0b7d68ef625079fb69ca523 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/de2cac9923bfa4f52967dd4330cfae9d5dfebaa792800e8bb8bb19662aebe5ea.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a5e84abeb85fa42ab6f4c84ed1a2de02fd3f1eeae1786afe8f10348c900425c9 +size 71547 diff --git a/data/2025/2504_10xxx/2504.10686/images/de492b7ced705d8f7a88d48385420eb72c09a6323ea94714fef518a990277b96.jpg b/data/2025/2504_10xxx/2504.10686/images/de492b7ced705d8f7a88d48385420eb72c09a6323ea94714fef518a990277b96.jpg new file mode 100644 index 0000000000000000000000000000000000000000..aef776d61c6ad1e758c87580a2df1a74c440274e --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/de492b7ced705d8f7a88d48385420eb72c09a6323ea94714fef518a990277b96.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2ae15b2eb7f9218875204ee09483993f97d1935c2a9e8113d0ef2ef231ab3c1e +size 22287 diff --git a/data/2025/2504_10xxx/2504.10686/images/de74ce1b8f0633eda8ca59b948c9f77773cbcc804b10a76ea45571ce864aee05.jpg b/data/2025/2504_10xxx/2504.10686/images/de74ce1b8f0633eda8ca59b948c9f77773cbcc804b10a76ea45571ce864aee05.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0a6e18c52627ec9b5b8c52372cb094e89ef0f6ca --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/de74ce1b8f0633eda8ca59b948c9f77773cbcc804b10a76ea45571ce864aee05.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f6fb9028e9c53848d9793a947ae334b04a96f2d2bf635f7c795a43eeabb2ce31 +size 3556 diff --git a/data/2025/2504_10xxx/2504.10686/images/dfc46883933c059577cb6e6eeaa96eebd866af9c6728c75dfb5d979abc1dad54.jpg b/data/2025/2504_10xxx/2504.10686/images/dfc46883933c059577cb6e6eeaa96eebd866af9c6728c75dfb5d979abc1dad54.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a83547bc17c7e8198167b7e208f63daef7095906 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/dfc46883933c059577cb6e6eeaa96eebd866af9c6728c75dfb5d979abc1dad54.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9d75afe78d14551cdcd2ea617f625faab20b57126e8ca3c495ff73269b1b2cf7 +size 121885 diff --git a/data/2025/2504_10xxx/2504.10686/images/e2f754ff416b95f767e85bf2241846e8853135980f1a33ca98e7b3b2dd78f4f5.jpg b/data/2025/2504_10xxx/2504.10686/images/e2f754ff416b95f767e85bf2241846e8853135980f1a33ca98e7b3b2dd78f4f5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..81bf395720b2530212d0e4e467b4598e107c5d25 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/e2f754ff416b95f767e85bf2241846e8853135980f1a33ca98e7b3b2dd78f4f5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cdb0bd744918fa99f499635808fbadac403e8e2a283263f92e18d1d04c2bf5fa +size 15309 diff --git a/data/2025/2504_10xxx/2504.10686/images/e3eb69eb5cd620b53c2d262f2784bb41f4fb5dc9b9c3f2636d12d2ad8c5064f9.jpg b/data/2025/2504_10xxx/2504.10686/images/e3eb69eb5cd620b53c2d262f2784bb41f4fb5dc9b9c3f2636d12d2ad8c5064f9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..30a5bc798b08fa4f43c640d10109cf72fab9fe0e --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/e3eb69eb5cd620b53c2d262f2784bb41f4fb5dc9b9c3f2636d12d2ad8c5064f9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b229493ae55b614bd66afe86f22652426b06e572880a746f73899aa33cc862d7 +size 4501 diff --git a/data/2025/2504_10xxx/2504.10686/images/e71224c1294caaddfff3a7868d78eb34cb3399b9a3e70cfae73f0f5093a5bc12.jpg b/data/2025/2504_10xxx/2504.10686/images/e71224c1294caaddfff3a7868d78eb34cb3399b9a3e70cfae73f0f5093a5bc12.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ba687d009527853ef16cc507e9f88aceba80a283 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/e71224c1294caaddfff3a7868d78eb34cb3399b9a3e70cfae73f0f5093a5bc12.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:60ccf0b14d553952ecfe3506f1cfa96a984298712e64142f713a5650067f424c +size 20435 diff --git a/data/2025/2504_10xxx/2504.10686/images/e80fade4787f96534f1e2ef24e32d09dadef36238ccf578c2e38675c21631355.jpg b/data/2025/2504_10xxx/2504.10686/images/e80fade4787f96534f1e2ef24e32d09dadef36238ccf578c2e38675c21631355.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b08b6af934cd69701daa2234fe6334665b6fdd5e --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/e80fade4787f96534f1e2ef24e32d09dadef36238ccf578c2e38675c21631355.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:25a47230a287a2f1ddcd4be8cb5c8be2b23acd372d07b933f3704caeae7618db +size 25023 diff --git a/data/2025/2504_10xxx/2504.10686/images/e8b1e59898d3b68eb6fd482318b1fd061e804edafc865f2b99d918a5991b38f5.jpg b/data/2025/2504_10xxx/2504.10686/images/e8b1e59898d3b68eb6fd482318b1fd061e804edafc865f2b99d918a5991b38f5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..501e8c1857141235f57f0140457e8dd40e700ab2 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/e8b1e59898d3b68eb6fd482318b1fd061e804edafc865f2b99d918a5991b38f5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:70849d4ca3aac2d072f3e00d21488944360bc45dd4f6424fcbe1ef1b0879725f +size 33616 diff --git a/data/2025/2504_10xxx/2504.10686/images/e9e1cda4695875e0b22a2ee15705c87b10f30d2b2a898093416b94ff467341a4.jpg b/data/2025/2504_10xxx/2504.10686/images/e9e1cda4695875e0b22a2ee15705c87b10f30d2b2a898093416b94ff467341a4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..78254324e0d0682c499529d2188ad1848e9b0c4d --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/e9e1cda4695875e0b22a2ee15705c87b10f30d2b2a898093416b94ff467341a4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:819771a00effa7fb1fc59db9bd6d22f9a9fd8f75c705bee63636d77acbf54443 +size 50690 diff --git a/data/2025/2504_10xxx/2504.10686/images/f3d66744d65647250f814ecd1fd97932c61509acc985e03178f1944b07d12c62.jpg b/data/2025/2504_10xxx/2504.10686/images/f3d66744d65647250f814ecd1fd97932c61509acc985e03178f1944b07d12c62.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6267f192412653fcd5fbfc943ebb7f69be175b7c --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/f3d66744d65647250f814ecd1fd97932c61509acc985e03178f1944b07d12c62.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d8bad8ffef01267f6419da626feffd96d413df532f76c4c74c68f7e727afa159 +size 6274 diff --git a/data/2025/2504_10xxx/2504.10686/images/f41c0d497ea15477ece5f1fc75a7c7d46d314cd74ff65e50caa4891ff1ad9ef1.jpg b/data/2025/2504_10xxx/2504.10686/images/f41c0d497ea15477ece5f1fc75a7c7d46d314cd74ff65e50caa4891ff1ad9ef1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a912fc62d31bc48f0193718dca181a327f49fba0 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/f41c0d497ea15477ece5f1fc75a7c7d46d314cd74ff65e50caa4891ff1ad9ef1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d29096d601cc0b0b4be46bdeb94d8cf5ce001b3dd8f3b04386a73c42ccf88990 +size 25099 diff --git a/data/2025/2504_10xxx/2504.10686/images/f515d21064efcb5f99d823286f26e5e7ffc92eccff0a11ad3be8c813baca6d94.jpg b/data/2025/2504_10xxx/2504.10686/images/f515d21064efcb5f99d823286f26e5e7ffc92eccff0a11ad3be8c813baca6d94.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c2936490bb322786d2b7a7cfc999be2e7f04ba60 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/f515d21064efcb5f99d823286f26e5e7ffc92eccff0a11ad3be8c813baca6d94.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:75e16d271e2af4146492f2db0d9dcc259a171faae650f1a5e20c206e0e9d2a36 +size 38053 diff --git a/data/2025/2504_10xxx/2504.10686/images/f7a459c9d7c6eda2acf426998d26a40c8bebbe250c1ca7ffe0217eb5634b2e71.jpg b/data/2025/2504_10xxx/2504.10686/images/f7a459c9d7c6eda2acf426998d26a40c8bebbe250c1ca7ffe0217eb5634b2e71.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b7d9611c9f89fc2d45ba12edd9c1d27eb465996c --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/f7a459c9d7c6eda2acf426998d26a40c8bebbe250c1ca7ffe0217eb5634b2e71.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3b7498776ac33aa9eeb3e955d3cb48d532588c61cbb3cd7df658d5b47dcef679 +size 100886 diff --git a/data/2025/2504_10xxx/2504.10686/images/f872827f0bbe062c88b104ed0c499d216e8b302b4a8aa46faf3793bedbc4cf18.jpg b/data/2025/2504_10xxx/2504.10686/images/f872827f0bbe062c88b104ed0c499d216e8b302b4a8aa46faf3793bedbc4cf18.jpg new file mode 100644 index 0000000000000000000000000000000000000000..227f499c83f81b6c4626996da9f23b1c1e80cc84 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/f872827f0bbe062c88b104ed0c499d216e8b302b4a8aa46faf3793bedbc4cf18.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:251bc6d1a4693a4b43697071889724235b8d57f0a42bb9c64b28c2854a5a1fa9 +size 1404 diff --git a/data/2025/2504_10xxx/2504.10686/images/f9533c63c96b88d6982d85f2095e9f195b9d8592e0275d4fb83d1ad4cc7289c3.jpg b/data/2025/2504_10xxx/2504.10686/images/f9533c63c96b88d6982d85f2095e9f195b9d8592e0275d4fb83d1ad4cc7289c3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9b322552d7fb08c17ebd32ea64cbf8e39266e619 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/f9533c63c96b88d6982d85f2095e9f195b9d8592e0275d4fb83d1ad4cc7289c3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1efc1b1df176dc41fa8bfbff9fe0965fff951ad318a4bfb3fa4dbb0c8ebf0680 +size 5959 diff --git a/data/2025/2504_10xxx/2504.10686/images/f9f256e2bdc25f83ac89a801417e8634751d782b6c46300dbea253a6df644900.jpg b/data/2025/2504_10xxx/2504.10686/images/f9f256e2bdc25f83ac89a801417e8634751d782b6c46300dbea253a6df644900.jpg new file mode 100644 index 0000000000000000000000000000000000000000..41dfd68f3f40b6d8864c7db6d48fa482f05831f5 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/f9f256e2bdc25f83ac89a801417e8634751d782b6c46300dbea253a6df644900.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5af1627734fdf93839b4986b92c8bc862f725d4a2e658ee679b67a7c8c0e5e90 +size 14584 diff --git a/data/2025/2504_10xxx/2504.10686/images/fa4aa7709eb7b2896ba4b086f4d265ee0a67592ca4ec36cd0bec84ce22f39b53.jpg b/data/2025/2504_10xxx/2504.10686/images/fa4aa7709eb7b2896ba4b086f4d265ee0a67592ca4ec36cd0bec84ce22f39b53.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c05e850ed8f03c383b5767eca9ee21ba6f35d45c --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/fa4aa7709eb7b2896ba4b086f4d265ee0a67592ca4ec36cd0bec84ce22f39b53.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:34082975a904b8810a0c33f396346450078cb2600c413955f6122889f90da13f +size 4448 diff --git a/data/2025/2504_10xxx/2504.10686/images/fb61cf2bd326fc5f88d5a3f8a510a81318ceffc6d1176cced4db306d4281519c.jpg b/data/2025/2504_10xxx/2504.10686/images/fb61cf2bd326fc5f88d5a3f8a510a81318ceffc6d1176cced4db306d4281519c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..356b4e674a74dd4f641c495a3f2907d15c6cc8d2 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/fb61cf2bd326fc5f88d5a3f8a510a81318ceffc6d1176cced4db306d4281519c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3cfb4880109ac4053793e61f5fa465f0ae8d67eae53405377393595737a964be +size 6001 diff --git a/data/2025/2504_10xxx/2504.10686/images/fb65ecdd10ff6c4921517e12c627c869473455c9ffd3e49a76b9808f00709b64.jpg b/data/2025/2504_10xxx/2504.10686/images/fb65ecdd10ff6c4921517e12c627c869473455c9ffd3e49a76b9808f00709b64.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1b2f0e6ce3a750acab1e898ff26bf632ed3f7cf3 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/fb65ecdd10ff6c4921517e12c627c869473455c9ffd3e49a76b9808f00709b64.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ba03ca4c0f36f613e48753a38d3be294642c8255419bcdd0072e84916d42c415 +size 3692 diff --git a/data/2025/2504_10xxx/2504.10686/images/ffa156e79bd432d8297ee8fb75d976bc2fd29b0015d9167843692ed720a5bacd.jpg b/data/2025/2504_10xxx/2504.10686/images/ffa156e79bd432d8297ee8fb75d976bc2fd29b0015d9167843692ed720a5bacd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1dc2e680f6e86e2e66a19fe475338c1a507e6375 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/ffa156e79bd432d8297ee8fb75d976bc2fd29b0015d9167843692ed720a5bacd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b32ae4bd4ca73b2607cc967c3c246930adab74f51b490fbe1e77f70765ebd5fb +size 1816 diff --git a/data/2025/2504_10xxx/2504.10686/images/ffaeb29db7fde13f35fcbf1cb39fb49d6fad1f29499bad6f15b795904e69fbdc.jpg b/data/2025/2504_10xxx/2504.10686/images/ffaeb29db7fde13f35fcbf1cb39fb49d6fad1f29499bad6f15b795904e69fbdc.jpg new file mode 100644 index 0000000000000000000000000000000000000000..164e7dd4c9f663fcd39a55f5f41415362330559d --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/images/ffaeb29db7fde13f35fcbf1cb39fb49d6fad1f29499bad6f15b795904e69fbdc.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:44315bf794c4286ecd40e0846d24a93d35359feed487e9d9bbc65f018cfd32c8 +size 20425 diff --git a/data/2025/2504_10xxx/2504.10686/layout.json b/data/2025/2504_10xxx/2504.10686/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..7d0d30503c48d32c9dfe6ad08537d23af5eb0f70 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10686/layout.json @@ -0,0 +1,46862 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 93, + 103, + 516, + 120 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 93, + 103, + 516, + 120 + ], + "spans": [ + { + "bbox": [ + 93, + 103, + 516, + 120 + ], + "type": "text", + "content": "The Tenth NTIRE 2025 Efficient Super-Resolution Challenge Report" + } + ] + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 63, + 142, + 545, + 521 + ], + "blocks": [ + { + "bbox": [ + 63, + 142, + 545, + 521 + ], + "lines": [ + { + "bbox": [ + 63, + 142, + 545, + 521 + ], + "spans": [ + { + "bbox": [ + 63, + 142, + 545, + 521 + ], + "type": "table", + "html": "
Bin Ren*Hang Guo*Lei Sun*Zongwei Wu*Radu Timofte*Yawei Li*
Yao ZhangXinning ChaiZhengxue ChengYingsheng QinYucai Yang
Li SongHongyuan YuPufan XuCheng WanZhijuan HuangPeng Guo
Shuyuan CuiChenjun LiXuehai HuPan PanXin ZhangHeng Zhang
Qing LuoLinyan JiangHaibo LeiQifang GaoYaqing LiWeihua Luo
Tsing LiQing WangYi LiuYang WangHongyu AnLiou Zhang
Shijie ZhaoLianhong SongLong SunJinshan PanJiangxin DongJinhui Tang
Jing WeiMengyang WangRuilong GuoQian WangQingliang Liu
Yang ChengDavinciEnxuan GuPinxin LiuYongsheng YuHang Hua
Yunlong TangShihao WangYukun YangZhiyu ZhangYukun YangJiyu Wu
Jiancheng HuangYifan LiuYi HuangShifeng ChenRui ChenYi Feng
Mingxi LiCailu WanXiangji WuZibin LiuJinyang ZhongKihwan Yoon
Ganzorig GankhuyagShengyun ZhongMingyang WuRenjie LiYushen Zuo
Zhengzhong TuZongang GaoGuannan ChenYuan TianWenhui Chen
Weijun YuanZhan LiYihang ChenYifan DengRuting DengYilin Zhang
Huan ZhengYanyan WeiWenxuan ZhaoSuiyi ZhaoFei WangKun Li
Yinggan TangMengjie SuJae-hyeon LeeDong-Hyeop SonUi-Jin Choi
Tiancheng ShaoYuqing ZhangMengcheng MaDonggeun KoYoungsang Kwak
Jiun LeeJaehwa KwakYuxuan JiangQiang ZhuSiyue TengFan Zhang
Shuyuan ZhuBing ZengDavid BullJing HuHui DengXuan Zhang
Lin ZhuQinrui FanWeijian DengJunnan WuWenqin DengYuquan Liu
Zhaohong XuJameer Babu PinjariKuldeep PurohitZeyu XiaoZhuoyuan Li
Surya VashisthAkshay DudhanePraful HambardeSachin Chaudhary
Satya Naryan TaziPrashant PatilSantosh Kumar VipparthiSubrahmanyam Murala
Wei-Chen ShenI-Hsiang ChenYunzhe XuChen ZhaoZhizhou Chen
Akram Khatami-RiziAhmad Mahmoudi-AznavehAlejandro MerinoBruno Longarela
Javier AbadMarcos V. CondeSimone BiancoLuca CogoGianmarco Corti
", + "image_path": "9b50cbffaa73a484b07fe9e673836900b5b744821aa2f3a6b5870cf1b1837401.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 152, + 548, + 199, + 559 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 152, + 548, + 199, + 559 + ], + "spans": [ + { + "bbox": [ + 152, + 548, + 199, + 559 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 573, + 294, + 597 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 573, + 294, + 597 + ], + "spans": [ + { + "bbox": [ + 55, + 573, + 294, + 597 + ], + "type": "text", + "content": "This paper presents a comprehensive review of the NTIRE 2025 Challenge on Single-Image Efficient Super-Resolution" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 313, + 548, + 555, + 681 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 548, + 555, + 681 + ], + "spans": [ + { + "bbox": [ + 313, + 548, + 555, + 681 + ], + "type": "text", + "content": "(ESR). The challenge aimed to advance the development of deep models that optimize key computational metrics, i.e., runtime, parameters, and FLOPs, while achieving a PSNR of at least 26.90 dB on the DIV2K_LSDIR_valid dataset and 26.99 dB on the DIV2K_LSDIR_test dataset. A robust participation saw 244 registered entrants, with 43 teams submitting valid entries. This report meticulously analyzes these methods and results, emphasizing groundbreaking advancements in state-of-the-art single-image ESR techniques. The analysis highlights innovative approaches and establishes benchmarks for future research in the field." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 14, + 208, + 37, + 558 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 208, + 37, + 558 + ], + "spans": [ + { + "bbox": [ + 14, + 208, + 37, + 558 + ], + "type": "text", + "content": "arXiv:2504.10686v1 [cs.CV] 14 Apr 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 608, + 295, + 685 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 608, + 295, + 685 + ], + "spans": [ + { + "bbox": [ + 55, + 608, + 295, + 685 + ], + "type": "text", + "content": "* B. Ren (bin.ren@unitn.it, University of Pisa & University of Trento, Italy), H. Guo (cshguo@gmail.com, Tsinghua University), L. Sun (lei.sun@insait.ai,INSAIT, Sofia University\"St. Kliment Ohridski\"), Z. Wu (zongwei.wu@uni-wuerzburg.de, University of Würzburg, Germany), R. Timofte (Radu.Timofte@uni-wuerzburg.de, University of Würzburg, Germany), and Y. Li (yawei.li@vision.ee.ethz.ch, ETH Zürich, Switzerland) were the challenge organizers, while the other authors participated in the challenge." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 56, + 685, + 238, + 693 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 685, + 238, + 693 + ], + "spans": [ + { + "bbox": [ + 56, + 685, + 238, + 693 + ], + "type": "text", + "content": "Appendix A contains the authors' teams and affiliations." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 56, + 694, + 280, + 703 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 694, + 280, + 703 + ], + "spans": [ + { + "bbox": [ + 56, + 694, + 280, + 703 + ], + "type": "text", + "content": "NTIRE 2025 webpage: https://cvslai.net/ntire/2025/." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 56, + 704, + 292, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 704, + 292, + 712 + ], + "spans": [ + { + "bbox": [ + 56, + 704, + 292, + 712 + ], + "type": "text", + "content": "Code: https://github.com/Amazingren/NTIRE2025_ESR/." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 302, + 733, + 307, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 733, + 307, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 733, + 307, + 741 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 71, + 136, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 71, + 136, + 83 + ], + "spans": [ + { + "bbox": [ + 56, + 71, + 136, + 83 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 91, + 294, + 235 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 91, + 294, + 235 + ], + "spans": [ + { + "bbox": [ + 55, + 91, + 294, + 235 + ], + "type": "text", + "content": "Single image super-resolution (SR) is designed to reconstruct a high-resolution (HR) image from a single low-resolution (LR) image, typically affected by blurring and down-sampling. The standard degradation model in traditional SR, bicubic down-sampling, allows for consistent benchmarks and systematic comparisons among different SR methods. This framework also serves as a platform to highlight the advances in SR technologies. SR techniques are widely used in fields such as satellite imaging, medical image enhancement, and surveillance, where improved image quality is essential for accurate interpretation and analysis." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 235, + 295, + 390 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 235, + 295, + 390 + ], + "spans": [ + { + "bbox": [ + 55, + 235, + 295, + 390 + ], + "type": "text", + "content": "State-of-the-art deep neural networks for image superresolution (SR) often suffer from overparameterization, intensive computation, and high latency, making their deployment on mobile devices for real-time SR applications challenging. To address these limitations, extensive research has focused on improving network efficiency through techniques such as network pruning, low-rank filter decomposition, network quantization, neural architecture search, state space modeling, diffusion priors, and knowledge distillation [76, 79, 89, 90, 129, 143, 146, 148]. These compression methods, successfully applied to image SR, optimize both the computational footprint and the operational speed [8, 91, 123]." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 390, + 295, + 569 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 390, + 295, + 569 + ], + "spans": [ + { + "bbox": [ + 55, + 390, + 295, + 569 + ], + "type": "text", + "content": "Efficient SR is particularly crucial for edge computing and mobile devices, where processing power, energy availability, and memory are limited. The enhanced efficiency of SR models ensures that these devices can execute high-quality image processing in real-time without exhausting system resources or draining battery life rapidly. Metrics like runtime, parameter count, and computational complexity (FLOPs) are vital for assessing the suitability of SR models for edge deployment. These parameters are key in maintaining a balance between performance and resource use, ensuring that mobile devices can deliver advanced imaging capabilities efficiently. This balance is critical for the widespread adoption of advanced SR techniques in everyday applications, driving the development of AI-enabled technologies that are both powerful and accessible." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 570, + 295, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 570, + 295, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 570, + 295, + 713 + ], + "type": "text", + "content": "In collaboration with the 2025 New Trends in Image Restoration and Enhancement (NTIRE 2025) workshop, we organize the challenge on single-image efficient superresolution. The challenge's goal is to super-resolve an LR image with a magnification factor of " + }, + { + "bbox": [ + 55, + 570, + 295, + 713 + ], + "type": "inline_equation", + "content": "\\times 4" + }, + { + "bbox": [ + 55, + 570, + 295, + 713 + ], + "type": "text", + "content": " using a network that reduces aspects such as runtime, parameters, FLOPs, of EFDN [116], while at least maintaining the " + }, + { + "bbox": [ + 55, + 570, + 295, + 713 + ], + "type": "inline_equation", + "content": "26.90~\\mathrm{dB}" + }, + { + "bbox": [ + 55, + 570, + 295, + 713 + ], + "type": "text", + "content": " on the DIV2K_LSDIR_valid dataset, and " + }, + { + "bbox": [ + 55, + 570, + 295, + 713 + ], + "type": "inline_equation", + "content": "26.99\\mathrm{dB}" + }, + { + "bbox": [ + 55, + 570, + 295, + 713 + ], + "type": "text", + "content": " on the DIV2K_LSDIR_test dataset. This challenge aims to discover advanced and innovative solutions for efficient SR, benchmark their efficiency, and identify general trends for the design of future efficient SR networks." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 313, + 72, + 553, + 288 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 553, + 288 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 553, + 288 + ], + "type": "text", + "content": "This challenge is one of the NTIRE 2025 Workshop associated challenges on: ambient lighting normalization [106], reflection removal in the wild [125], shadow removal [105], event-based image deblurring [97], image denoising [98], XGC quality assessment [74], UGC video enhancement [93], night photography rendering [28], image super-resolution (x4) [12], real-world face restoration [13], efficient super-resolution [92], HR depth estimation [130], efficient burst HDR and restoration [58], cross-domain few-shot object detection [29], short-form UGC video quality assessment and enhancement [62, 63], text to image generation model quality assessment [36], day and night rain-drop removal for dual-focused images [61], video quality assessment for video conferencing [47], low light image enhancement [75], light field super-resolution [121], restore any image model (RAIM) in the wild [68], raw restoration and super-resolution [16] and raw reconstruction from RGB on smartphones [17]." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 313, + 308, + 555, + 337 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 308, + 555, + 337 + ], + "spans": [ + { + "bbox": [ + 313, + 308, + 555, + 337 + ], + "type": "text", + "content": "2. NTIRE 2025 Efficient Super-Resolution Challenge" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 313, + 345, + 555, + 430 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 345, + 555, + 430 + ], + "spans": [ + { + "bbox": [ + 313, + 345, + 555, + 430 + ], + "type": "text", + "content": "The goals of this challenge include: (i) promoting research in the area of single-imae efficient super-resolution, (ii) facilitating comparisons between the efficiency of various methods, and (iii) providing a platform for academic and industrial participants to engage, discuss, and potentially establish collaborations. This section delves into the specifics of the challenge." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 446, + 373, + 458 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 446, + 373, + 458 + ], + "spans": [ + { + "bbox": [ + 313, + 446, + 373, + 458 + ], + "type": "text", + "content": "2.1. Dataset" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 467, + 553, + 683 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 467, + 553, + 683 + ], + "spans": [ + { + "bbox": [ + 313, + 467, + 553, + 683 + ], + "type": "text", + "content": "The DIV2K [4] dataset and LSDIR [64] dataset are utilized for this challenge. The DIV2K dataset consists of 1,000 diverse 2K resolution RGB images, which are split into a training set of 800 images, a validation set of 100 images, and a test set of 100 images. The LSDIR dataset contains 86,991 high-resolution high-quality images, which are split into a training set of 84,991 images, a validation set of 1,000 images, and a test set of 1,000 images. In this challenge, the corresponding LR DIV2K images are generated by bicubic downsampling with a down-scaling factor of " + }, + { + "bbox": [ + 313, + 467, + 553, + 683 + ], + "type": "inline_equation", + "content": "4 \\times" + }, + { + "bbox": [ + 313, + 467, + 553, + 683 + ], + "type": "text", + "content": ". The training images from DIV2K and LSDIR are provided to the participants of the challenge. During the validation phase, 100 images from the DIV2K validation set and 100 images from the LSDIR validation set are made available to participants. During the test phase, 100 images from the DIV2K test set and another 100 images from the LSDIR test set are used. Throughout the entire challenge, the testing HR images remain hidden from the participants." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 329, + 703, + 489, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 329, + 703, + 489, + 712 + ], + "spans": [ + { + "bbox": [ + 329, + 703, + 489, + 712 + ], + "type": "text", + "content": "https://www.cvlai.net/ntire/2025/" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 183, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 183, + 83 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 183, + 83 + ], + "type": "text", + "content": "2.2. EFDN Baseline Model" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 90, + 296, + 162 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 90, + 296, + 162 + ], + "spans": [ + { + "bbox": [ + 55, + 90, + 296, + 162 + ], + "type": "text", + "content": "The Edge-Enhanced Feature Distillation Network (EFDN) [116] serves as the baseline model in this challenge. The aim is to improve its efficiency in terms of runtime, number of parameters, and FLOPs, while at least maintaining 26.90 dB on the DIV2K_LSDIR_valid dataset and 26.99 dB on the DIV2K_LSDIR_test dataset." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 163, + 296, + 376 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 163, + 296, + 376 + ], + "spans": [ + { + "bbox": [ + 55, + 163, + 296, + 376 + ], + "type": "text", + "content": "The main idea within EFDN is a combination of block composing, architecture searching, and loss designing to obtain a trade-off between performance and lightweighting. Especially, For block composing, EFDN sum up the re-parameterization methods [20, 21, 138] and designs a more effective and complex edge-enhanced diverse branch block. In detail, they employ several reasonable reparameterizable branches to enhance the structural information extraction, and then they integrate them into a vanilla convolution to maintain the inference performance. To ensure the effective optimization of parallel branches in EDBB, they designed an edge-enhanced gradient-variance loss (EG) based on the gradient-variance loss [1]. The proposed loss enforces minimizing the difference between the computed variance maps, which is helpful to restore sharper edges. The gradient maps calculated by different filters and the corresponding EG loss. In addition, the NAS strategy of DLSR is adopted to search for a robust backbone." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 379, + 296, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 379, + 296, + 533 + ], + "spans": [ + { + "bbox": [ + 55, + 379, + 296, + 533 + ], + "type": "text", + "content": "The baseline EFDN emerges as the 1st place for the overall performance of the NTIRE2023 Efficient SR Challenge [116]. The quantitative performance and efficiency metrics of EFDN are summarized as follows: (1) The number of parameters is " + }, + { + "bbox": [ + 55, + 379, + 296, + 533 + ], + "type": "inline_equation", + "content": "0.276\\mathrm{M}" + }, + { + "bbox": [ + 55, + 379, + 296, + 533 + ], + "type": "text", + "content": ". (2) The average PSNRs on validation (DIV2K 100 valid images and LSDIR 100 valid images) and testing (DIV2K 100 test images and LSDIR 100 test images) sets of this challenge are 26.93 dB and 27.01 dB, respectively. (3) The runtime averaged to 22.18ms on the validation and test set with PyTorch " + }, + { + "bbox": [ + 55, + 379, + 296, + 533 + ], + "type": "inline_equation", + "content": "2.0.0 + \\mathrm{cu}118" + }, + { + "bbox": [ + 55, + 379, + 296, + 533 + ], + "type": "text", + "content": ", and a single NVIDIA RTX A6000 GPU. (4) The number of FLOPs for an input of size " + }, + { + "bbox": [ + 55, + 379, + 296, + 533 + ], + "type": "inline_equation", + "content": "256\\times 256" + }, + { + "bbox": [ + 55, + 379, + 296, + 533 + ], + "type": "text", + "content": " is " + }, + { + "bbox": [ + 55, + 379, + 296, + 533 + ], + "type": "inline_equation", + "content": "16.70\\mathrm{G}" + }, + { + "bbox": [ + 55, + 379, + 296, + 533 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 544, + 192, + 556 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 544, + 192, + 556 + ], + "spans": [ + { + "bbox": [ + 55, + 544, + 192, + 556 + ], + "type": "text", + "content": "2.3. Tracks and Competition" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 562, + 295, + 621 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 562, + 295, + 621 + ], + "spans": [ + { + "bbox": [ + 55, + 562, + 295, + 621 + ], + "type": "text", + "content": "The aim of this challenge is to devise a network that reduces one or several aspects such as runtime, parameters, and FLOPs, while at least maintaining the 26.90 dB on the DIV2K_LSDIR valid dataset, and 26.99 dB on the DIV2K_LSDIR test dataset." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 629, + 296, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 629, + 296, + 714 + ], + "spans": [ + { + "bbox": [ + 55, + 629, + 296, + 714 + ], + "type": "text", + "content": "Challenge phases: (1) Development and validation phase: Participants were given access to 800 LR/HR training image pairs and 200 LR/HR validation image pairs from the DIV2K and the LSDIR datasets. An additional 84,991 LR/HR training image pairs from the LSDIR dataset are also provided to the participants. The EFDN model, pretrained parameters, and validation demo script are available" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 313, + 72, + 555, + 264 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 555, + 264 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 555, + 264 + ], + "type": "text", + "content": "on GitHub https://github.com/Amazingren/NTIRE2025_ESR, allowing participants to benchmark their models' runtime on their systems. Participants could upload their HR validation results to the evaluation server to calculate the PSNR of the super-resolved image produced by their models and receive immediate feedback. The corresponding number of parameters, FLOPs, and runtime will be computed by the participants. (2) Testing phase: In the final test phase, participants were granted access to 100 LR testing images from DIV2K and 100 LR testing images from LSDIR, while the HR ground-truth images remained hidden. Participants submitted their super-resolved results to the Codalab evaluation server and emailed the code and factsheet to the organizers. The organizers verified and ran the provided code to obtain the final results, which were then shared with participants at the end of the challenge." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 277, + 556, + 553 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 277, + 556, + 553 + ], + "spans": [ + { + "bbox": [ + 313, + 277, + 556, + 553 + ], + "type": "text", + "content": "Evaluation protocol: Quantitative evaluation metrics included validation and testing PSNRs, runtime, FLOPs, and the number of parameters during inference. PSNR was measured by discarding a 4-pixel boundary around the images. The average runtime during inference was computed on the 200 LR validation images and the 200 LR testing images. The average runtime on the validation and testing sets served as the final runtime indicator. FLOPs are evaluated on an input image of size " + }, + { + "bbox": [ + 313, + 277, + 556, + 553 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 313, + 277, + 556, + 553 + ], + "type": "text", + "content": ". Among these metrics, runtime was considered the most important. Participants were required to maintain a PSNR of at least 26.90 dB on the DIV2K_LSDIR valid dataset, and 26.99 dB on the DIV2K_LSDIR test dataset during the challenge. The constraint on the testing set helped prevent overfitting on the validation set. It's important to highlight that methods with a PSNR below the specified threshold (i.e., 26.90 dB on DIV2K_LSDIR_valid and, 26.99 dB on DIV2K_LSDIR_test) will not be considered for the subsequent ranking process. It is essential to meet the minimum PSNR requirement to be eligible for further evaluation and ranking. A code example for calculating these metrics is available at https://github.com/Amazingren/NTIRE2025_ESR." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 555, + 556, + 651 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 555, + 556, + 651 + ], + "spans": [ + { + "bbox": [ + 313, + 555, + 556, + 651 + ], + "type": "text", + "content": "To better quantify the rankings, we followed the scoring function from NTIRE2024 ESR [91] for three evaluation metrics in this challenge: runtime, FLOPs, and parameters. This scoring aims to convert the performance of each metric into corresponding scores to make the rankings more significant. Especially, the score for each separate metric (i.e., Runtime, FLOPs, and parameter) for each sub-track is calculated as:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 342, + 663, + 555, + 689 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 342, + 663, + 555, + 689 + ], + "spans": [ + { + "bbox": [ + 342, + 663, + 555, + 689 + ], + "type": "interline_equation", + "content": "\\text {S c o r e} _ {\\text {M e t r i c}} = \\frac {\\operatorname {E x p} (2 \\times \\operatorname {M e t r i c} _ {\\text {T e a m X}})}{\\operatorname {M e t r i c} _ {\\text {B a s e l i n e}}}, \\tag {1}", + "image_path": "2f6ed61f123f2e629cc3e4c864db9d8028dac5dc9b39c42245ef1a5b9060f050.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 701, + 555, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 701, + 555, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 701, + 555, + 713 + ], + "type": "text", + "content": "based on the score of each metric, the final score used for" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 181, + 83 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 181, + 83 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 181, + 83 + ], + "type": "text", + "content": "the main track is calculated as:" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 94, + 87, + 294, + 129 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 94, + 87, + 294, + 129 + ], + "spans": [ + { + "bbox": [ + 94, + 87, + 294, + 129 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\text {S c o r e} = w _ {1} \\times \\text {S c o r e} \\\\ + w _ {2} \\times S c o r e \\_ F L O P s \\tag {2} \\\\ + w _ {3} \\times S c o r e \\_ P a r a m s, \\\\ \\end{array}", + "image_path": "23878695d7788854da19099b7b55349d915b8272fd39b89acbe3ce96ec10141f.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 134, + 295, + 183 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 134, + 295, + 183 + ], + "spans": [ + { + "bbox": [ + 55, + 134, + 295, + 183 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 55, + 134, + 295, + 183 + ], + "type": "inline_equation", + "content": "w_{1}, w_{2}" + }, + { + "bbox": [ + 55, + 134, + 295, + 183 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 55, + 134, + 295, + 183 + ], + "type": "inline_equation", + "content": "w_{3}" + }, + { + "bbox": [ + 55, + 134, + 295, + 183 + ], + "type": "text", + "content": " are set to 0.7, 0.15, and 0.15, respectively. This setting is intended to incentivize participants to design a method that prioritizes speed efficiency while maintaining a reasonable model complexity." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 191, + 162, + 205 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 191, + 162, + 205 + ], + "spans": [ + { + "bbox": [ + 55, + 191, + 162, + 205 + ], + "type": "text", + "content": "3. Challenge Results" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 211, + 295, + 354 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 211, + 295, + 354 + ], + "spans": [ + { + "bbox": [ + 55, + 211, + 295, + 354 + ], + "type": "text", + "content": "The final challenge results and the corresponding rankings are presented in Tab. 1 The table also includes the baseline method EFDN [116] for comparison. In Sec.4, the methods evaluated in Tab. 1 are briefly explained, while the team members are listed in A. The performance of different methods is compared from four different perspectives, including the runtime, FLOPs, the parameters, and the overall performance. Furthermore, in order to promote a fair competition emphasizing efficiency, the criteria for image reconstruction quality in terms of test PSNR are set to 26.90 and 26.99 on the DIV2K_LSDIR_valid and DIV2K_LSDIR_test sets, respectively." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 355, + 295, + 617 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 355, + 295, + 617 + ], + "spans": [ + { + "bbox": [ + 55, + 355, + 295, + 617 + ], + "type": "text", + "content": "Runtime. In this challenge, runtime stands as the paramount evaluation metric. ShannonLab's solution emerges as the frontrunner with the shortest runtime among all entries in the efficient SR challenge, securing its top-3 ranking position. Following closely, the TSSR and mbga claim the second and third spots, respectively. Remarkably, the average runtime of the top three solutions on both the validation and test sets remains below " + }, + { + "bbox": [ + 55, + 355, + 295, + 617 + ], + "type": "inline_equation", + "content": "10\\mathrm{ms}" + }, + { + "bbox": [ + 55, + 355, + 295, + 617 + ], + "type": "text", + "content": ". Impressively, the first 13 teams present solutions with an average runtime below " + }, + { + "bbox": [ + 55, + 355, + 295, + 617 + ], + "type": "inline_equation", + "content": "16\\mathrm{ms}" + }, + { + "bbox": [ + 55, + 355, + 295, + 617 + ], + "type": "text", + "content": ", showcasing a continuous enhancement in the efficiency of image SR networks. Despite the slight differences in runtime among the top three teams, the challenge retains its competitive edge. An additional distinction from previous challenges worth noting is that this year, runtime performance no longer predominantly dictates the overall rankings as it has in the past, where the top three solutions in terms of runtime were also the top performers in the main track (e.g., from NTIRE ESR 2024 [91]). This shift indicates that participants are now emphasizing a more balanced approach, focusing not only on runtime optimization but also on improving the comprehensive performance of their models" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 618, + 295, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 618, + 295, + 714 + ], + "spans": [ + { + "bbox": [ + 55, + 618, + 295, + 714 + ], + "type": "text", + "content": "Parameters. Model complexity was further evaluated by considering the number of parameters, as detailed in Table 1. In this sub-track, VEPG_C achieved the top position with only 0.044M parameters, closely followed by HannahSR and XUPTBoys with 0.060M and 0.072M parameters, respectively. The minimal disparity among the top three methods highlights their competitive edge and efficiency in managing model complexity. They were scored" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 313, + 72, + 553, + 120 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 553, + 120 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 553, + 120 + ], + "type": "text", + "content": "at 1.38, 1.54, and 1.68, respectively, indicating a tight competition. However, it is noteworthy that these models also exhibited relatively high runtimes, suggesting an area for potential improvement in future iterations." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 121, + 555, + 506 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 121, + 555, + 506 + ], + "spans": [ + { + "bbox": [ + 313, + 121, + 555, + 506 + ], + "type": "text", + "content": "FLOPs. The number of floating-point operations (FLOPs) is another critical metric for assessing model complexity. Within this sub-track, VEPG_C, XUPTBoys, and HannahSR secured the top three positions with FLOPs of 3.13G, 3.39G, and 3.75G, respectively. The competitiveness of this sub-track is further confirmed by the close scores of 1.45, 1.50, and 1.57, aligned with the parameter evaluation results. Remarkably, the same models top both the parameters and FLOPs evaluations, demonstrating consistent performance across different complexity metrics. Similar to the parameters sub-track, the extended runtimes of these methods point to a need for further research and optimization. Key implications include: i) Efficiency vs. Performance Trade-off: The close competition among the top models in terms of parameters and FLOPs suggests a significant trade-off between model efficiency and performance. Despite achieving minimal parameter counts and FLOPs, the high runtimes indicate that these models might be optimizing computational complexity at the expense of execution speed. This raises important considerations for future research in balancing efficiency with real-world usability, especially in applications where inference speed is critical. ii) Potential for Model Optimization: The consistency in ranking between the parameters and FLOPs sub-tracks reveals that models which are optimized for one aspect of computational efficiency tend to perform well in others. However, the noted high runtimes across these models suggest an untapped potential for holistic model optimization. Future work could focus on integrating more advanced optimization techniques or exploring novel architectural innovations to enhance both the computational efficiency and runtime performance." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 508, + 554, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 508, + 554, + 616 + ], + "spans": [ + { + "bbox": [ + 313, + 508, + 554, + 616 + ], + "type": "text", + "content": "Overall Evaluation. The final assessment of performance employs a comprehensive metric that synthesizes runtime, FLOPs, and the number of parameters into a unified score. In this rigorous evaluation, the EMSR Group excelled, claiming the prestigious top position, followed by XiaomiMM (the winner of the NTIRE ESR 2024 challenge) and ShannonLab in second and third places, respectively. This achievement highlights the sophisticated engineering and innovative approaches implemented by these groups." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 618, + 556, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 618, + 556, + 714 + ], + "spans": [ + { + "bbox": [ + 313, + 618, + 556, + 714 + ], + "type": "text", + "content": "Contrasting with the previous year, where runtime heavily influenced overall rankings, this year presents a shift. The best performer in runtime only secured third place in the overall competition. Specifically, EMSR, the overall winner, ranked fifth in runtime, sixth in parameters, and seventh in FLOPs. Similarly, XiaomiMM, which came second overall, was fourth in runtime, eleventh in parameters, and thirteenth in FLOPs. This demonstrates that: i) A balanced" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 70, + 555, + 159 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 70, + 555, + 159 + ], + "spans": [ + { + "bbox": [ + 55, + 70, + 555, + 159 + ], + "type": "text", + "content": "Table 1. Results of Ninth NTIRE 2025 Efficient SR Challenge. The performance of the solutions is compared thoroughly from three perspectives including the runtime, FLOPs, and the number of parameters. The underscript numbers associated with each metric score denote the ranking of the solution in terms of that metric. For runtime, “Val.” is the runtime averaged on DIV2K_LSDIR_valid validation set. “Test” is the runtime averaged on a test set with 200 images from DIV2K_LSDIR_test set, respectively. “Ave.” is averaged on the validation and test datasets. “#Params” is the total number of parameters of a model. “FLOPs” denotes the floating point operations. Main Track combines all three evaluation metrics. The ranking for the main track is based on the score calculated via Eq. 2, and the ranking for other sub-tracks is based on the score of each metric via Eq. 1. Please note that this is not a challenge for PSNR improvement. The “validation/testing PSNR” is not ranked. For all the scores, the lower, the better." + } + ] + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 56, + 168, + 555, + 507 + ], + "blocks": [ + { + "bbox": [ + 56, + 168, + 555, + 507 + ], + "lines": [ + { + "bbox": [ + 56, + 168, + 555, + 507 + ], + "spans": [ + { + "bbox": [ + 56, + 168, + 555, + 507 + ], + "type": "table", + "html": "
TeamsPSNR [dB]Runtime [ms]#Params [M]FLOPs [G]Sub-Track ScoresMain-Track
Val.TestVal.TestAve.Runtime#ParamsFLOPsOverall ScoreRanking
EMSR26.9226.9910.2689.7209.9940.1318.542.46(5)2.58(6)2.78(7)2.531
XiaomiMM26.9227.009.9589.1329.5450.1489.682.36(4)2.92(11)3.19(13)2.572
ShannonLab26.9027.008.9388.3028.6200.17211.232.18(1)3.48(17)3.84(18)2.623
TSSR26.9027.029.8128.8989.3550.16410.692.32(2)3.28(15)3.60(16)2.664
Davinci26.9227.0011.4269.87610.6510.1469.552.61(6)2.88(9)3.14(11)2.735
SRCB26.9227.0011.4129.96010.6860.1469.552.62(7)2.88(9)3.14(11)2.746
Rochester26.9427.0111.93410.45411.1940.15810.302.74(8)3.14(14)3.43(14)2.917
mbga26.9027.009.8229.2089.5150.19212.562.36(3)4.02(19)4.50(20)2.938
IESR26.9026.9913.76012.58213.1710.1438.323.28(10)2.82(7)2.71(6)3.129
ASR26.9027.0013.86411.98412.9240.1549.063.21(9)3.05(12)2.96(8)3.1510
VPEG_O26.9026.9916.35613.92615.1410.1459.423.92(12)2.86(8)3.09(9)3.6311
mmSR26.9527.0514.45012.03613.2430.21213.853.30(11)4.65(21)5.25(23)3.8012
ChanSR26.9227.0316.73815.59216.1650.21011.594.29(16)4.58(20)4.01(19)4.2913
Pixel Alchemists26.9027.0117.32214.60815.9650.21312.934.22(14)4.68(22)4.70(21)4.3614
MiSR26.9027.0217.05614.98816.0220.21313.864.24(15)4.68(22)5.26(24)4.4615
LZ26.9027.0116.98015.45016.2150.25216.424.31(17)6.21(25)7.15(25)5.0216
Z626.9026.9920.36216.18418.2730.30318.705.19(20)8.99(27)9.39(27)6.3917
TACO_SR26.9427.0517.82815.65216.7400.34220.034.52(18)11.92(30)11.01(30)6.6118
AIOT_AI26.9027.0019.83618.15818.9970.30119.565.54(21)8.86(26)10.41(28)6.7719
JNU62026.9027.0120.68818.28219.4850.32520.315.79(22)10.54(29)11.39(31)7.3420
LVGroup_HFUT26.9627.0716.39414.87615.6350.42627.874.09(13)21.91(33)28.15(34)10.3821
SVM26.9227.0430.61028.13429.3720.25113.3914.13(23)6.16(24)4.97(22)11.5622
YG26.9227.0433.65831.61432.6360.0935.8218.96(24)1.96(5)2.01(5)13.8723
NanoSR26.9727.0817.93016.30017.1150.55136.024.68(19)54.20(35)74.72(35)22.6124
MegastudyEdu Vision AI27.0127.1339.37637.52838.4520.16910.6332.03(25)3.40(16)3.57(15)23.4725
XUPTBoys26.9127.0350.56435.01242.7880.0723.3947.36(26)1.68(3)1.50(2)33.6326
MILA26.9027.0244.36242.03443.1980.0874.9349.14(27)1.88(4)1.80(4)34.9527
AiMF_SR26.9827.1046.59443.09244.8430.1809.4857.00(28)3.69(18)3.11(10)40.9228
EagleSR27.0427.1647.73045.19246.4610.35221.8965.95(29)12.82(31)13.76(32)50.1529
BVIVSR26.9726.9949.48846.79848.1430.15510.7976.75(30)3.07(13)3.64(17)54.7330
HannahSR26.9027.0258.28641.42249.8540.0603.7589.55(31)1.54(2)1.57(3)63.1531
VPEG_C26.9027.0060.04640.95050.4980.0443.1394.90(32)1.38(1)1.45(1)66.8632
CUIT_HT27.0927.2062.03859.10660.5720.30919.75235.36(33)9.39(28)10.65(29)167.7633
GXZY AI27.0127.13102.92499.102101.0130.42825.889.02e3(34)22.23(34)22.18(33)6.32e334
SCMSR26.9227.00133.866114.088123.9770.39317.627.15e4(35)17.25(32)8.25(26)5.01e435
IPCV27.2727.40366.924357.268362.0960.86665.661.51e14(36)531.32(37)2.60e3(36)1.05e1436
X-L27.0727.21525.966479.346502.6560.96670.834.81e19(37)1.10e3(38)4.83e3(37)3.36e1937
Quantum Res27.2927.40574.632558.934566.7830.79076.091.56e22(38)306.32(36)9.07e3(38)1.09e2238
The following methods are not ranked since their validation/testing PSNR (underlined) is not on par with the threshold.
SylabSR24.3624.4628.58024.82626.7030.0727.9011.111.682.588.41-
NJUPCA26.7026.8070.20252.93261.5672.30830.11257.451.83e736.822.75e6-
DepthIBN26.5626.6639.15436.87638.0150.1217.7130.802.402.5222.30-
Cidaut AI26.8626.9527.22024.97426.0970.21012.8310.524.584.658.75-
IVL26.6626.7618.74616.94417.8450.24015.645.005.696.515.33-
Baseline26.9327.0123.91220.45422.1830.27616.77.397.397.397.39-
", + "image_path": "7d49cf2eb698bccefc0d40589b7e4973c91e663cd6c23a2111a0cbded9d52ffc.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 528, + 294, + 635 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 528, + 294, + 635 + ], + "spans": [ + { + "bbox": [ + 55, + 528, + 294, + 635 + ], + "type": "text", + "content": "approach to model design, optimizing across multiple metrics rather than focusing on a single aspect, is becoming crucial in competitive evaluations. ii) Achieving top performance in one metric does not guarantee similar success in overall rankings, underscoring the complexity of model optimization in real-world scenarios. This year's goal was to encourage a balanced pursuit of speed and efficiency, a challenge that has evidently led to significant innovations and advancements in model design." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 641, + 295, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 641, + 295, + 714 + ], + "spans": [ + { + "bbox": [ + 55, + 641, + 295, + 714 + ], + "type": "text", + "content": "PSNR. Team Quantum Res, IPCV, X-L, and CUIT_HTT demonstrate superior PSNR values, a critical evaluation metric in super-resolution. Specifically, Quantum Res and IPCV lead with an exceptional 27.40 dB, closely followed by X-L with 27.21 dB, and CUIT_HTT at 27.20 dB on the DIV2K_LSDIR_test set. Despite these impressive perfor" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 313, + 528, + 555, + 708 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 528, + 555, + 708 + ], + "spans": [ + { + "bbox": [ + 313, + 528, + 555, + 708 + ], + "type": "text", + "content": "mances, it is essential to emphasize that the primary focus of this challenge is on efficiency in super-resolution. Accordingly, we have adjusted the PSNR criteria, setting rigorous lower thresholds of 26.90 dB and 26.99 dB for the DIV2K_LSDIR_valid and DIV2K_LSDIR_test sets, respectively. This adjustment is designed to prioritize a balance between high performance and computational efficiency. A commendable total of 38 teams met this adjusted benchmark, demonstrating their capability to effectively balance image quality with efficiency. However, teams like IVL, Cidaut AI, SylabSR DepthIB, and NJUPCA, while notable for their efficiency, did not achieve the required PSNR levels. This highlights the ongoing challenge of optimizing super-resolution processes that meet both efficiency and performance standards, underscoring the complex nature of" + } + ] + } + ], + "index": 4 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 5 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 72, + 165, + 83 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 72, + 165, + 83 + ], + "spans": [ + { + "bbox": [ + 56, + 72, + 165, + 83 + ], + "type": "text", + "content": "advancements in this field." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 106, + 132, + 118 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 106, + 132, + 118 + ], + "spans": [ + { + "bbox": [ + 55, + 106, + 132, + 118 + ], + "type": "text", + "content": "3.1. Main Ideas" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 128, + 295, + 201 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 128, + 295, + 201 + ], + "spans": [ + { + "bbox": [ + 55, + 128, + 295, + 201 + ], + "type": "text", + "content": "Throughout this challenge, several techniques have been proposed to enhance the efficiency of deep neural networks for image super-resolution (SR) while striving to maintain optimal performance. The choice of techniques largely depends on the specific metrics that a team aims to optimize. Below, we outline some typical ideas that have emerged:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 211, + 295, + 714 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 55, + 211, + 295, + 295 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 211, + 295, + 295 + ], + "spans": [ + { + "bbox": [ + 55, + 211, + 295, + 295 + ], + "type": "text", + "content": "- Distillation is an effective manner to maintain the PSNR performance without increasing computation cost during inference. The team EMSR added only the ConvLora-Like [7] operation into the base model. Similarly, team ESPAN also proposed to use the self-distillation for progressive learning strategy validated from [42]." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 295, + 295, + 403 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 295, + 295, + 403 + ], + "spans": [ + { + "bbox": [ + 55, + 295, + 295, + 403 + ], + "type": "text", + "content": "- Re-parameterization [22] [24, 126] is commonly used in this challenge. Usually, a normal convolutional layer with multiple basic operations (" + }, + { + "bbox": [ + 55, + 295, + 295, + 403 + ], + "type": "inline_equation", + "content": "3 \\times 3" + }, + { + "bbox": [ + 55, + 295, + 295, + 403 + ], + "type": "text", + "content": " convolution, " + }, + { + "bbox": [ + 55, + 295, + 295, + 403 + ], + "type": "inline_equation", + "content": "1 \\times 1" + }, + { + "bbox": [ + 55, + 295, + 295, + 403 + ], + "type": "text", + "content": " operation, first and second-order derivative operators, skip connections) is parameterized during training. During inference, the multiple operations that reparameterize a convolution could be merged back into a single convolution. e.g., Some top teams (i.e., XiaomiMM, mmSR, HannahSR, etc) used this operation in their methods." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 403, + 295, + 475 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 403, + 295, + 475 + ], + "spans": [ + { + "bbox": [ + 55, + 403, + 295, + 475 + ], + "type": "text", + "content": "- Parameter-free attention mechanism is validated as a useful technique to enhance computational efficiency [24, 126]. Specifically, XiaomiMM proposed a swift parameter-free attention network based on parameter-free attention, which achieves the lowest runtime while maintaining a decent PSNR performance." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 475, + 295, + 546 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 475, + 295, + 546 + ], + "spans": [ + { + "bbox": [ + 55, + 475, + 295, + 546 + ], + "type": "text", + "content": "- Incorporating multi-scale information and hierarchical module design are proven strategies for effectively fusing critical information. For instance, solutions such as HannahSR, XuPTBoys, and ChanSR have successfully utilized multi-scale residual connections and hierarchical module designs to enhance their performance." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 546, + 295, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 546, + 295, + 605 + ], + "spans": [ + { + "bbox": [ + 55, + 546, + 295, + 605 + ], + "type": "text", + "content": "- Network pruning plays an important role. It is observed that a couple of teams (i.e., ASR, Davinci) used network pruning techniques to slightly compress a network. This leads to a more lightweight architecture without a heavy performance drop." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 55, + 606, + 295, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 606, + 295, + 666 + ], + "spans": [ + { + "bbox": [ + 55, + 606, + 295, + 666 + ], + "type": "text", + "content": "- Exploration with new network architectures is conducted. Besides the common CNN or Transformers, the state space model (i.e., vision mamba [30, 32]) was tried by GXZY.AI in this challenge, which was also validated in the last NTIRE ESR challenge [91]." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 55, + 666, + 295, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 666, + 295, + 714 + ], + "spans": [ + { + "bbox": [ + 55, + 666, + 295, + 714 + ], + "type": "text", + "content": "- Various other techniques are also attempted. Some teams also proposed solutions based on neural architecture search, vision transformers, frequency processing, multi-stage design, and advanced training strategies." + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 314, + 72, + 376, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 72, + 376, + 83 + ], + "spans": [ + { + "bbox": [ + 314, + 72, + 376, + 83 + ], + "type": "text", + "content": "3.2. Fairness" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 89, + 555, + 317 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 89, + 555, + 317 + ], + "spans": [ + { + "bbox": [ + 313, + 89, + 555, + 317 + ], + "type": "text", + "content": "To ensure the integrity and fairness of the Efficient SR Challenge, we meticulously established a set of rules focusing on the permissible datasets for training the models. Participants were allowed to augment their training with external datasets, such as Flickr2K, to promote diverse and comprehensive model training experiences. However, to guarantee an unbiased evaluation, the use of additional DIV2K and LSDIR validation sets, which include both high-resolution (HR) and low-resolution (LR) images, was explicitly prohibited during the training phase. This restriction aimed to maintain the validation set's integrity as a vital benchmark for assessing the proposed networks' performance and generalizability. Moreover, using LR images from the DIV2K and LSDIR test sets for training was strictly forbidden, ensuring the test dataset's purity and upholding the evaluation process's integrity. Lastly, the adoption of advanced data augmentation techniques during training was encouraged as a fair practice, allowing participants to enhance their models within the defined rules and guidelines." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 324, + 394, + 335 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 324, + 394, + 335 + ], + "spans": [ + { + "bbox": [ + 313, + 324, + 394, + 335 + ], + "type": "text", + "content": "3.3. Conclusions" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 342, + 554, + 365 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 342, + 554, + 365 + ], + "spans": [ + { + "bbox": [ + 313, + 342, + 554, + 365 + ], + "type": "text", + "content": "The analysis of the submissions to this year's Efficient SR Challenge allows us to draw several important conclusions:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 366, + 554, + 700 + ], + "type": "list", + "angle": 0, + "index": 21, + "blocks": [ + { + "bbox": [ + 313, + 366, + 553, + 449 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 366, + 553, + 449 + ], + "spans": [ + { + "bbox": [ + 313, + 366, + 553, + 449 + ], + "type": "text", + "content": "- Firstly, the competition within the image super-resolution (SR) community remains intense. This year, the challenge attracted 244 registered participants, with 43 teams making valid submissions. All proposed methods have enhanced the state-of-the-art in efficient SR. Notably, the competition among the top three teams has intensified, with last year's winner ranking second this year." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 449, + 554, + 498 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 449, + 554, + 498 + ], + "spans": [ + { + "bbox": [ + 313, + 449, + 554, + 498 + ], + "type": "text", + "content": "- Secondly, unlike in previous challenges, dominance in runtime no longer characterizes the top-ranking teams. Instead, more balanced solutions that consider all aspects of performance are proving to be more beneficial." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 313, + 498, + 553, + 546 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 498, + 553, + 546 + ], + "spans": [ + { + "bbox": [ + 313, + 498, + 553, + 546 + ], + "type": "text", + "content": "- Thirdly, consistent with the success of deep learning techniques like DeepSeek, the distillation approach has significantly contributed to performance improvements without adding computational complexity." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 313, + 546, + 553, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 546, + 553, + 594 + ], + "spans": [ + { + "bbox": [ + 313, + 546, + 553, + 594 + ], + "type": "text", + "content": "- Fourthly, re-parameterization and network compression have emerged as crucial techniques in enhancing efficiency in SR. Ongoing exploration in these areas is encouraged to further boost efficiency." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 313, + 594, + 553, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 594, + 553, + 665 + ], + "spans": [ + { + "bbox": [ + 313, + 594, + 553, + 665 + ], + "type": "text", + "content": "- Fifthly, the use of large-scale datasets, such as the one described in [64], for pre-training has been shown to enhance accuracy significantly. Typically, training incorporates multiple phases, gradually increasing the patch size and decreasing the learning rate, optimizing the training process." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 313, + 665, + 553, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 665, + 553, + 700 + ], + "spans": [ + { + "bbox": [ + 313, + 665, + 553, + 700 + ], + "type": "text", + "content": "- Sixthly, this year's challenge saw the introduction of the state space model, presenting a novel approach that may influence future research directions in the field." + } + ] + } + ], + "index": 20 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 325, + 701, + 553, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 325, + 701, + 553, + 714 + ], + "spans": [ + { + "bbox": [ + 325, + 701, + 553, + 714 + ], + "type": "text", + "content": "Overall, by considering factors like runtime, FLOPs," + } + ] + } + ], + "index": 22 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 733, + 309, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 733, + 309, + 742 + ], + "spans": [ + { + "bbox": [ + 302, + 733, + 309, + 742 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 23 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 296, + 193 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 296, + 193 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 296, + 193 + ], + "type": "text", + "content": "and parameter count simultaneously, it is feasible to design models that optimize across multiple evaluation metrics. Finally, as computational capabilities continue to evolve, the focus on optimizing models for runtime, FLOPs, and parameter efficiency becomes increasingly vital. With advancements in both hardware and software, we expect the development of more sophisticated and efficient models in the super-resolution domain. The pursuit of efficiency in SR is likely to remain a key driver of innovation, promising exciting advancements and continual progress in the field." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 205, + 228, + 219 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 205, + 228, + 219 + ], + "spans": [ + { + "bbox": [ + 55, + 205, + 228, + 219 + ], + "type": "text", + "content": "4. Challenge Methods and Teams" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 226, + 110, + 237 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 226, + 110, + 237 + ], + "spans": [ + { + "bbox": [ + 55, + 226, + 110, + 237 + ], + "type": "text", + "content": "4.1. EMSR" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 243, + 296, + 352 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 243, + 296, + 352 + ], + "spans": [ + { + "bbox": [ + 55, + 243, + 296, + 352 + ], + "type": "text", + "content": "Method. The overall architecture of the team EMSR is shown in Fig. 1, which is based on the leading efficient super-resolution method SPAN [112]. Inspired by ConvLora [7], the team proposes SconvLB, which incorporates ConvLora into SPAB to improve performance without increasing computation complexity. Specifically, given a pre-trained convolutional layer in SPAB, they update it by adding Lora layers, and representing it with a low-rank decomposition:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 118, + 363, + 295, + 376 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 363, + 295, + 376 + ], + "spans": [ + { + "bbox": [ + 118, + 363, + 295, + 376 + ], + "type": "interline_equation", + "content": "W _ {\\text {C o n v L o r a}} = W _ {P T} + X Y, \\tag {3}", + "image_path": "3f67cc192541e998eec00480a1a2492eb605099f89e6c32a785e3c3a27965043.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 387, + 296, + 471 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 387, + 296, + 471 + ], + "spans": [ + { + "bbox": [ + 55, + 387, + 296, + 471 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 55, + 387, + 296, + 471 + ], + "type": "inline_equation", + "content": "W_{ConvLora}" + }, + { + "bbox": [ + 55, + 387, + 296, + 471 + ], + "type": "text", + "content": " denotes the updated weight parameters of the convolution, " + }, + { + "bbox": [ + 55, + 387, + 296, + 471 + ], + "type": "inline_equation", + "content": "W_{PT}" + }, + { + "bbox": [ + 55, + 387, + 296, + 471 + ], + "type": "text", + "content": " denotes the original pre-trained parameters of the convolution, " + }, + { + "bbox": [ + 55, + 387, + 296, + 471 + ], + "type": "inline_equation", + "content": "X" + }, + { + "bbox": [ + 55, + 387, + 296, + 471 + ], + "type": "text", + "content": " is initialized by random Gaussian distribution, and " + }, + { + "bbox": [ + 55, + 387, + 296, + 471 + ], + "type": "inline_equation", + "content": "Y" + }, + { + "bbox": [ + 55, + 387, + 296, + 471 + ], + "type": "text", + "content": " is zero in the beginning of training. Note that the Lora weights can be merged into the main backbone. Therefore, ConvLoras don't introduce extra computation during inference." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 472, + 296, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 472, + 296, + 544 + ], + "spans": [ + { + "bbox": [ + 55, + 472, + 296, + 544 + ], + "type": "text", + "content": "They adopt the pre-trained SPAN-Tiny model [112] with 26 channels. They replace the SPAB in SPAN with our proposed SconvLB, and also add ConvLora into the pixel shuffle block and the convolution before it. During training, they freeze the original weight and bias of the convolution and only update the Lora parameters." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 544, + 296, + 675 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 544, + 296, + 675 + ], + "spans": [ + { + "bbox": [ + 55, + 544, + 296, + 675 + ], + "type": "text", + "content": "Optimization. To supervise the optimization of SconvLB, they adopt a knowledge-based distillation training strategy. They adopt spatial affinity-based knowledge distillation [37] to transfer second-order statistical info from the teacher model to the student model by aligning spatial feature affinity matrices at multiple layers of the networks. Given a feature " + }, + { + "bbox": [ + 55, + 544, + 296, + 675 + ], + "type": "inline_equation", + "content": "F_{l} \\in R^{B \\times C \\times W \\times H}" + }, + { + "bbox": [ + 55, + 544, + 296, + 675 + ], + "type": "text", + "content": " extracted from the " + }, + { + "bbox": [ + 55, + 544, + 296, + 675 + ], + "type": "inline_equation", + "content": "l" + }, + { + "bbox": [ + 55, + 544, + 296, + 675 + ], + "type": "text", + "content": "-th layer of the network, they first flatten the tensor along the last two dimensions and calculate the affinity matrix " + }, + { + "bbox": [ + 55, + 544, + 296, + 675 + ], + "type": "inline_equation", + "content": "A_{\\text{spatial}}" + }, + { + "bbox": [ + 55, + 544, + 296, + 675 + ], + "type": "text", + "content": ". Then the spatial feature affinity-based distillation loss can be formulated as:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 111, + 685, + 295, + 715 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 685, + 295, + 715 + ], + "spans": [ + { + "bbox": [ + 111, + 685, + 295, + 715 + ], + "type": "interline_equation", + "content": "L _ {A D} = \\frac {1}{| A |} \\sum_ {l = 1} ^ {n} \\left\\| A _ {l} ^ {S} - A _ {l} ^ {T} \\right\\| _ {1}, \\tag {4}", + "image_path": "8cbbb99fffc40ec7d3df107176ab0e1d25d99c8a663aeb0e008ad9ce578e6cc3.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 72, + 553, + 131 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 553, + 131 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 553, + 131 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 313, + 72, + 553, + 131 + ], + "type": "inline_equation", + "content": "A_{l}^{S}" + }, + { + "bbox": [ + 313, + 72, + 553, + 131 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 72, + 553, + 131 + ], + "type": "inline_equation", + "content": "A_{l}^{T}" + }, + { + "bbox": [ + 313, + 72, + 553, + 131 + ], + "type": "text", + "content": " are the spatial affinity matrix of student and teacher networks extracted from the feature maps of the " + }, + { + "bbox": [ + 313, + 72, + 553, + 131 + ], + "type": "inline_equation", + "content": "l" + }, + { + "bbox": [ + 313, + 72, + 553, + 131 + ], + "type": "text", + "content": "-th layer, respectively. " + }, + { + "bbox": [ + 313, + 72, + 553, + 131 + ], + "type": "inline_equation", + "content": "|A|" + }, + { + "bbox": [ + 313, + 72, + 553, + 131 + ], + "type": "text", + "content": " denotes the number of elements in the affinity matrix. Specifically, the team applies the distillation loss after each SconvLB." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 133, + 553, + 157 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 133, + 553, + 157 + ], + "spans": [ + { + "bbox": [ + 313, + 133, + 553, + 157 + ], + "type": "text", + "content": "Except for the distillation loss in the feature space, the team applies a pixel-level distillation loss:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 369, + 166, + 553, + 180 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 369, + 166, + 553, + 180 + ], + "spans": [ + { + "bbox": [ + 369, + 166, + 553, + 180 + ], + "type": "interline_equation", + "content": "L _ {T S} = \\left\\| \\mathcal {T} \\left(I _ {L R}\\right) - \\mathcal {S} \\left(I _ {L R}\\right) \\right\\| _ {1}, \\tag {5}", + "image_path": "5ccecde1d3ef823534fdf75e86042b78c4e74ce2fc7138ff98d2bf2829190011.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 190, + 553, + 213 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 190, + 553, + 213 + ], + "spans": [ + { + "bbox": [ + 313, + 190, + 553, + 213 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 313, + 190, + 553, + 213 + ], + "type": "inline_equation", + "content": "\\mathcal{T}" + }, + { + "bbox": [ + 313, + 190, + 553, + 213 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 190, + 553, + 213 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 313, + 190, + 553, + 213 + ], + "type": "text", + "content": " denote the teacher network and the student network, respectively. " + }, + { + "bbox": [ + 313, + 190, + 553, + 213 + ], + "type": "inline_equation", + "content": "I_{LR}" + }, + { + "bbox": [ + 313, + 190, + 553, + 213 + ], + "type": "text", + "content": " denotes the LR image." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 326, + 213, + 443, + 225 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 326, + 213, + 443, + 225 + ], + "spans": [ + { + "bbox": [ + 326, + 213, + 443, + 225 + ], + "type": "text", + "content": "They also apply the " + }, + { + "bbox": [ + 326, + 213, + 443, + 225 + ], + "type": "inline_equation", + "content": "L_{2}" + }, + { + "bbox": [ + 326, + 213, + 443, + 225 + ], + "type": "text", + "content": " loss:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 377, + 235, + 553, + 249 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 377, + 235, + 553, + 249 + ], + "spans": [ + { + "bbox": [ + 377, + 235, + 553, + 249 + ], + "type": "interline_equation", + "content": "L _ {r e c} = \\left\\| I _ {H R} - \\mathcal {S} \\left(I _ {L R}\\right) \\right\\| _ {2} ^ {2}, \\tag {6}", + "image_path": "9d57b96f81dfb69c25ced351b0ea2d15b1355523d983f5145ab00fb53b103f3a.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 258, + 553, + 281 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 258, + 553, + 281 + ], + "spans": [ + { + "bbox": [ + 313, + 258, + 553, + 281 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 313, + 258, + 553, + 281 + ], + "type": "inline_equation", + "content": "I_{HR}" + }, + { + "bbox": [ + 313, + 258, + 553, + 281 + ], + "type": "text", + "content": " denotes the ground truth high-resolution image. The overall loss is:" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 356, + 293, + 553, + 306 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 356, + 293, + 553, + 306 + ], + "spans": [ + { + "bbox": [ + 356, + 293, + 553, + 306 + ], + "type": "interline_equation", + "content": "L _ {t o t a l} = \\lambda_ {1} L _ {r e c} + \\lambda_ {2} L _ {T S} + \\lambda_ {3} L _ {A D}. \\tag {7}", + "image_path": "4ec5aed984692f2599a018cf1a8613857c620577add3863933589b6b58f6373d.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 313, + 316, + 553, + 363 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 316, + 553, + 363 + ], + "spans": [ + { + "bbox": [ + 313, + 316, + 553, + 363 + ], + "type": "text", + "content": "Training Details. The team uses DIV2K and LSDIR for training. Random flipping and random rotation are used for data augmentation. The training process is divided into two stages." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 314, + 364, + 553, + 460 + ], + "type": "list", + "angle": 0, + "index": 20, + "blocks": [ + { + "bbox": [ + 315, + 364, + 553, + 423 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 364, + 553, + 423 + ], + "spans": [ + { + "bbox": [ + 315, + 364, + 553, + 423 + ], + "type": "text", + "content": "1. Stage One: HR patches of size " + }, + { + "bbox": [ + 315, + 364, + 553, + 423 + ], + "type": "inline_equation", + "content": "192 \\times 192" + }, + { + "bbox": [ + 315, + 364, + 553, + 423 + ], + "type": "text", + "content": " are randomly cropped from HR images, and the mini-batch size is set to 8. The model is trained by minimizing the " + }, + { + "bbox": [ + 315, + 364, + 553, + 423 + ], + "type": "inline_equation", + "content": "L_{total}" + }, + { + "bbox": [ + 315, + 364, + 553, + 423 + ], + "type": "text", + "content": " mentioned above with the Adam optimizer. The learning rate is " + }, + { + "bbox": [ + 315, + 364, + 553, + 423 + ], + "type": "inline_equation", + "content": "1 \\times 10^{-4}" + }, + { + "bbox": [ + 315, + 364, + 553, + 423 + ], + "type": "text", + "content": ". A total of " + }, + { + "bbox": [ + 315, + 364, + 553, + 423 + ], + "type": "inline_equation", + "content": "30k" + }, + { + "bbox": [ + 315, + 364, + 553, + 423 + ], + "type": "text", + "content": " iterations are trained." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 314, + 424, + 553, + 460 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 424, + 553, + 460 + ], + "spans": [ + { + "bbox": [ + 314, + 424, + 553, + 460 + ], + "type": "text", + "content": "2. Stage Two: In the second stage, the team increases the size of the HR image patches to " + }, + { + "bbox": [ + 314, + 424, + 553, + 460 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 314, + 424, + 553, + 460 + ], + "type": "text", + "content": ", with other settings remaining the same as in the first stage." + } + ] + } + ], + "index": 19 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 313, + 460, + 553, + 496 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 460, + 553, + 496 + ], + "spans": [ + { + "bbox": [ + 313, + 460, + 553, + 496 + ], + "type": "text", + "content": "Throughout the entire training process, they employ an Exponential Moving Average (EMA) strategy to enhance the robustness of training." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 314, + 504, + 391, + 516 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 504, + 391, + 516 + ], + "spans": [ + { + "bbox": [ + 314, + 504, + 391, + 516 + ], + "type": "text", + "content": "4.2. XiaomiMM" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 313, + 521, + 555, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 521, + 555, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 521, + 555, + 713 + ], + "type": "text", + "content": "Method Details. The team proposes an accelerated variant of the Swift Parameter-free Attention Network (SPAN) [112], called SPANF, which is built upon the fundamental SPAB block. To enhance the inference speed, SPANF introduces several key modifications compared to the original SPAN model. Firstly, they remove the last SPAB block, which reduces computational complexity without significantly impacting performance. Secondly, they increase the number of channels to 32, providing a better balance between model capacity and speed. Thirdly, they replace the first convolution layer with a nearest neighbor upsampling operation, which is computationally less intensive and accelerates the upsampling process. Lastly, they implement simple modifications to the shortcut connections within the network to further streamline computations. These changes collectively enable SPANF to achieve faster" + } + ] + } + ], + "index": 23 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 24 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 58, + 74, + 553, + 231 + ], + "blocks": [ + { + "bbox": [ + 58, + 74, + 553, + 231 + ], + "lines": [ + { + "bbox": [ + 58, + 74, + 553, + 231 + ], + "spans": [ + { + "bbox": [ + 58, + 74, + 553, + 231 + ], + "type": "image", + "image_path": "8ac9e00d1996213e6f79f7b908791efe8ef055eead3451ed8eaed1fab8097e08.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 58, + 244, + 550, + 257 + ], + "lines": [ + { + "bbox": [ + 58, + 244, + 550, + 257 + ], + "spans": [ + { + "bbox": [ + 58, + 244, + 550, + 257 + ], + "type": "text", + "content": "Figure 1. Team EMSR: The team incorporates ConvLoras into the network to increase the performance without adding extra complexity." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 59, + 271, + 291, + 398 + ], + "blocks": [ + { + "bbox": [ + 59, + 271, + 291, + 398 + ], + "lines": [ + { + "bbox": [ + 59, + 271, + 291, + 398 + ], + "spans": [ + { + "bbox": [ + 59, + 271, + 291, + 398 + ], + "type": "image", + "image_path": "aad7982650cb376bfab88d1f41e6514c27a63d4ddf9e751f866833b7ce6411d9.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 408, + 295, + 441 + ], + "lines": [ + { + "bbox": [ + 55, + 408, + 295, + 441 + ], + "spans": [ + { + "bbox": [ + 55, + 408, + 295, + 441 + ], + "type": "text", + "content": "Figure 2. The proposed SPANF architecture. The main structure is basically the same as SPAN [112], but one SPAB module is reduced, and the number of channels is 32." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 471, + 295, + 553 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 471, + 295, + 553 + ], + "spans": [ + { + "bbox": [ + 55, + 471, + 295, + 553 + ], + "type": "text", + "content": "inference speeds while maintaining competitive image quality. The evaluations on multiple benchmarks demonstrate that SPANF not only upholds the efficiency of SPAN's parameter-free attention mechanism but also offers superior speed, making it highly suitable for real-world applications, particularly in scenarios with limited computational resources." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 558, + 296, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 558, + 296, + 714 + ], + "spans": [ + { + "bbox": [ + 55, + 558, + 296, + 714 + ], + "type": "text", + "content": "Implementation Details. The dataset utilized for training comprises of DIV2K and LSDIR. During each training batch, 64 HR RGB patches are cropped, measuring " + }, + { + "bbox": [ + 55, + 558, + 296, + 714 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 55, + 558, + 296, + 714 + ], + "type": "text", + "content": ", and subjected to random flipping and rotation. The learning rate is initialized at " + }, + { + "bbox": [ + 55, + 558, + 296, + 714 + ], + "type": "inline_equation", + "content": "5 \\times 10^{-4}" + }, + { + "bbox": [ + 55, + 558, + 296, + 714 + ], + "type": "text", + "content": " and undergoes a halving process every " + }, + { + "bbox": [ + 55, + 558, + 296, + 714 + ], + "type": "inline_equation", + "content": "2 \\times 10^{5}" + }, + { + "bbox": [ + 55, + 558, + 296, + 714 + ], + "type": "text", + "content": " iterations. The network undergoes training for a total of " + }, + { + "bbox": [ + 55, + 558, + 296, + 714 + ], + "type": "inline_equation", + "content": "10^{6}" + }, + { + "bbox": [ + 55, + 558, + 296, + 714 + ], + "type": "text", + "content": " iterations, with the L1 loss function being minimized through the utilization of the Adam optimizer [54]. They repeated the aforementioned training settings four times after loading the trained weights. Subsequently, fine-tuning is executed using the L1 and L2 loss functions, with an initial learning rate of " + }, + { + "bbox": [ + 55, + 558, + 296, + 714 + ], + "type": "inline_equation", + "content": "1 \\times 10^{-5}" + }, + { + "bbox": [ + 55, + 558, + 296, + 714 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 55, + 558, + 296, + 714 + ], + "type": "inline_equation", + "content": "5 \\times 10^{5}" + }, + { + "bbox": [ + 55, + 558, + 296, + 714 + ], + "type": "text", + "content": " iterations, and HR patch size of 512. They con" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 313, + 268, + 555, + 315 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 268, + 555, + 315 + ], + "spans": [ + { + "bbox": [ + 313, + 268, + 555, + 315 + ], + "type": "text", + "content": "duced finetuning on four models utilizing both L1 and L2 losses, and employed batch sizes of 64 and 128. Finally, they integrated these models' parameters to obtain our ultimate model." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 313, + 323, + 397, + 335 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 323, + 397, + 335 + ], + "spans": [ + { + "bbox": [ + 313, + 323, + 397, + 335 + ], + "type": "text", + "content": "4.3. ShannonLab" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 341, + 555, + 509 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 341, + 555, + 509 + ], + "spans": [ + { + "bbox": [ + 313, + 341, + 555, + 509 + ], + "type": "text", + "content": "Method. The method proposed by the team draws inspiration from ECBSR and SPAN. First, they optimized the ECB module by introducing a 1x1 convolutional layer for channel expansion before the input tensor enters the ECB module. After processing, another 1x1 convolution restores the original channel dimensions, while incorporating residual connections. During inference, these components can be merged into a standard 3x3 convolution through reparameterization, thereby enhancing the ECB module's effectiveness without increasing computational overhead. As illustrated in Fig.3, The complete model architecture of TSR comprises a shallow feature extraction convolution, a reconstruction convolution, a PixelShuffle module, and four REECB block which made of stacked optimized ECB." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 509, + 554, + 568 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 509, + 554, + 568 + ], + "spans": [ + { + "bbox": [ + 313, + 509, + 554, + 568 + ], + "type": "text", + "content": "Training Details. The model is trained on the DIV2K and LSDIR train dataset with random flipping and rotation applied for data augmentation. The Adam optimizer is consistently employed throughout the training process. The entire training process is divided into five steps." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 570, + 553, + 713 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 313, + 570, + 553, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 570, + 553, + 628 + ], + "spans": [ + { + "bbox": [ + 313, + 570, + 553, + 628 + ], + "type": "text", + "content": "1. HR patches of size " + }, + { + "bbox": [ + 313, + 570, + 553, + 628 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 313, + 570, + 553, + 628 + ], + "type": "text", + "content": " are randomly cropped from HR images, and the mini-batch size is set to 32. L1 loss is used and the initial learning rate is set to 5e-4, with a cosine learning rate decay strategy. The total iterations is 500k." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 629, + 553, + 688 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 629, + 553, + 688 + ], + "spans": [ + { + "bbox": [ + 313, + 629, + 553, + 688 + ], + "type": "text", + "content": "2. HR patches of size " + }, + { + "bbox": [ + 313, + 629, + 553, + 688 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 313, + 629, + 553, + 688 + ], + "type": "text", + "content": " are randomly cropped from HR images, and the mini-batch size is set to 32. L1 and L2 loss is used and the initial learning rate is set to 5e-4, with a cosine learning rate decay strategy. The total iterations is 1000k." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 689, + 553, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 689, + 553, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 689, + 553, + 713 + ], + "type": "text", + "content": "3. HR patches of size " + }, + { + "bbox": [ + 313, + 689, + 553, + 713 + ], + "type": "inline_equation", + "content": "512 \\times 512" + }, + { + "bbox": [ + 313, + 689, + 553, + 713 + ], + "type": "text", + "content": " are randomly cropped from HR images, and the mini-batch size is set to 64. L2" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 60, + 74, + 293, + 110 + ], + "blocks": [ + { + "bbox": [ + 60, + 74, + 293, + 110 + ], + "lines": [ + { + "bbox": [ + 60, + 74, + 293, + 110 + ], + "spans": [ + { + "bbox": [ + 60, + 74, + 293, + 110 + ], + "type": "image", + "image_path": "d8575e44d65aceb2fd23b5bd961f7c359c9fc2f3717d1291c9ae893c01490dc3.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 82, + 121, + 268, + 133 + ], + "lines": [ + { + "bbox": [ + 82, + 121, + 268, + 133 + ], + "spans": [ + { + "bbox": [ + 82, + 121, + 268, + 133 + ], + "type": "text", + "content": "Figure 3. Team ShannonLab: The pipeline of TSR." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 152, + 295, + 186 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 152, + 295, + 186 + ], + "spans": [ + { + "bbox": [ + 55, + 152, + 295, + 186 + ], + "type": "text", + "content": "loss is used and the initial learning rate is set to 2e-4, with a cosine learning rate decay strategy. The total iterations is " + }, + { + "bbox": [ + 55, + 152, + 295, + 186 + ], + "type": "inline_equation", + "content": "1000\\mathrm{k}" + }, + { + "bbox": [ + 55, + 152, + 295, + 186 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 187, + 295, + 305 + ], + "type": "list", + "angle": 0, + "index": 5, + "blocks": [ + { + "bbox": [ + 55, + 187, + 295, + 246 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 187, + 295, + 246 + ], + "spans": [ + { + "bbox": [ + 55, + 187, + 295, + 246 + ], + "type": "text", + "content": "4. HR patches of size " + }, + { + "bbox": [ + 55, + 187, + 295, + 246 + ], + "type": "inline_equation", + "content": "512 \\times 512" + }, + { + "bbox": [ + 55, + 187, + 295, + 246 + ], + "type": "text", + "content": " are randomly cropped from HR images, and the mini-batch size is set to 64. L2 loss is used and the initial learning rate is set to 1e-4, with a cosine learning rate decay strategy. The total iterations is 1000k." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 247, + 295, + 305 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 247, + 295, + 305 + ], + "spans": [ + { + "bbox": [ + 55, + 247, + 295, + 305 + ], + "type": "text", + "content": "5. HR patches of size " + }, + { + "bbox": [ + 55, + 247, + 295, + 305 + ], + "type": "inline_equation", + "content": "512 \\times 512" + }, + { + "bbox": [ + 55, + 247, + 295, + 305 + ], + "type": "text", + "content": " are randomly cropped from HR images, and the mini-batch size is set to 64. L2 loss is used and the initial learning rate is set to 1e-5, with a cosine learning rate decay strategy. The total iterations is 1000k." + } + ] + } + ], + "index": 4 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 55, + 312, + 105, + 324 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 312, + 105, + 324 + ], + "spans": [ + { + "bbox": [ + 55, + 312, + 105, + 324 + ], + "type": "text", + "content": "4.4. TSSR" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 331, + 295, + 378 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 331, + 295, + 378 + ], + "spans": [ + { + "bbox": [ + 55, + 331, + 295, + 378 + ], + "type": "text", + "content": "Method. They combined the ideas of reparameterization and attention mechanism to design a model that can capture image information in the network and effectively achieve image super-resolution." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 55, + 379, + 295, + 403 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 379, + 295, + 403 + ], + "spans": [ + { + "bbox": [ + 55, + 379, + 295, + 403 + ], + "type": "text", + "content": "Training Details. The training process is divided into three steps." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 55, + 403, + 295, + 582 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 55, + 403, + 295, + 461 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 403, + 295, + 461 + ], + "spans": [ + { + "bbox": [ + 55, + 403, + 295, + 461 + ], + "type": "text", + "content": "1. HR patches of size " + }, + { + "bbox": [ + 55, + 403, + 295, + 461 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 55, + 403, + 295, + 461 + ], + "type": "text", + "content": " are randomly cropped from HR images, and the mini-batch size is set to 64. L1 loss with AdamW optimizer is used and the initial learning rate is set to 0.0005 and halved at every 100k iterations. The total iterations is 500k." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 55, + 462, + 295, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 462, + 295, + 521 + ], + "spans": [ + { + "bbox": [ + 55, + 462, + 295, + 521 + ], + "type": "text", + "content": "2. HR patches of size " + }, + { + "bbox": [ + 55, + 462, + 295, + 521 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 55, + 462, + 295, + 521 + ], + "type": "text", + "content": " are randomly cropped from HR images, and the mini-batch size is set to 64. L1 and L2 loss with AdamW optimizer is used and the initial learning rate is set to 0.0002 and halved at every 100k iterations. The total iterations is 1000k." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 55, + 522, + 295, + 582 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 522, + 295, + 582 + ], + "spans": [ + { + "bbox": [ + 55, + 522, + 295, + 582 + ], + "type": "text", + "content": "3. HR patches of size " + }, + { + "bbox": [ + 55, + 522, + 295, + 582 + ], + "type": "inline_equation", + "content": "512 \\times 512" + }, + { + "bbox": [ + 55, + 522, + 295, + 582 + ], + "type": "text", + "content": " are randomly cropped from HR images, and the mini-batch size is set to 64. L2 loss with AdamW optimizer is used and the initial learning rate is set to 0.0001 and halved at every 100k iterations. The total iterations is 1000k." + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 55, + 588, + 105, + 601 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 588, + 105, + 601 + ], + "spans": [ + { + "bbox": [ + 55, + 588, + 105, + 601 + ], + "type": "text", + "content": "4.5. mbga" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 55, + 605, + 295, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 605, + 295, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 605, + 295, + 713 + ], + "type": "text", + "content": "Architecture. The team proposes the ESPAN, which is based on SPAN [111]. Through evaluations of depth-channel combinations in SPAN on an A6000 GPU, they determined that setting the number of channels to 32 yields higher efficiency than 28 channels. To reduce parameters and FLOPs, a depth of 6 was adopted. Additionally, a " + }, + { + "bbox": [ + 55, + 605, + 295, + 713 + ], + "type": "inline_equation", + "content": "9 \\times 9" + }, + { + "bbox": [ + 55, + 605, + 295, + 713 + ], + "type": "text", + "content": " convolution replaced the conventional " + }, + { + "bbox": [ + 55, + 605, + 295, + 713 + ], + "type": "inline_equation", + "content": "3 \\times 3" + }, + { + "bbox": [ + 55, + 605, + 295, + 713 + ], + "type": "text", + "content": " convolution at the network's input stage since they find that " + }, + { + "bbox": [ + 55, + 605, + 295, + 713 + ], + "type": "inline_equation", + "content": "9 \\times 9" + }, + { + "bbox": [ + 55, + 605, + 295, + 713 + ], + "type": "text", + "content": " convolution is faster than " + }, + { + "bbox": [ + 55, + 605, + 295, + 713 + ], + "type": "inline_equation", + "content": "3 \\times 3" + }, + { + "bbox": [ + 55, + 605, + 295, + 713 + ], + "type": "text", + "content": " convolution on A6000." + } + ] + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 367, + 78, + 504, + 222 + ], + "blocks": [ + { + "bbox": [ + 367, + 78, + 504, + 222 + ], + "lines": [ + { + "bbox": [ + 367, + 78, + 504, + 222 + ], + "spans": [ + { + "bbox": [ + 367, + 78, + 504, + 222 + ], + "type": "image", + "image_path": "8625a75f76c2da91bb4d2e4dae9cd14a3e706d54d637ec9410ab4e46f76d0fe9.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 339, + 239, + 528, + 250 + ], + "lines": [ + { + "bbox": [ + 339, + 239, + 528, + 250 + ], + "spans": [ + { + "bbox": [ + 339, + 239, + 528, + 250 + ], + "type": "text", + "content": "Figure 4. Team mbga: General Reparameterization." + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 316, + 264, + 555, + 345 + ], + "blocks": [ + { + "bbox": [ + 316, + 264, + 555, + 345 + ], + "lines": [ + { + "bbox": [ + 316, + 264, + 555, + 345 + ], + "spans": [ + { + "bbox": [ + 316, + 264, + 555, + 345 + ], + "type": "image", + "image_path": "ffaeb29db7fde13f35fcbf1cb39fb49d6fad1f29499bad6f15b795904e69fbdc.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 339, + 352, + 528, + 364 + ], + "lines": [ + { + "bbox": [ + 339, + 352, + 528, + 364 + ], + "spans": [ + { + "bbox": [ + 339, + 352, + 528, + 364 + ], + "type": "text", + "content": "Figure 5. Team mbga: ESPAN with self distillation." + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_caption" + } + ], + "index": 17 + }, + { + "bbox": [ + 313, + 388, + 555, + 508 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 388, + 555, + 508 + ], + "spans": [ + { + "bbox": [ + 313, + 388, + 555, + 508 + ], + "type": "text", + "content": "General Reparameterization. Inspired by MobileOne [107] and RepVGG [23], the team proposes a generalized reparameterization block (Fig. 4). The block consists of four " + }, + { + "bbox": [ + 313, + 388, + 555, + 508 + ], + "type": "inline_equation", + "content": "1 \\times 1 - 3 \\times 3" + }, + { + "bbox": [ + 313, + 388, + 555, + 508 + ], + "type": "text", + "content": " convolution branches, one " + }, + { + "bbox": [ + 313, + 388, + 555, + 508 + ], + "type": "inline_equation", + "content": "1 \\times 1" + }, + { + "bbox": [ + 313, + 388, + 555, + 508 + ], + "type": "text", + "content": " convolution branch, and one " + }, + { + "bbox": [ + 313, + 388, + 555, + 508 + ], + "type": "inline_equation", + "content": "3 \\times 3" + }, + { + "bbox": [ + 313, + 388, + 555, + 508 + ], + "type": "text", + "content": " convolution branch. Skip connections are omitted due to empirical observations of training instability. While additional duplicated branches or " + }, + { + "bbox": [ + 313, + 388, + 555, + 508 + ], + "type": "inline_equation", + "content": "3 \\times 3 - 1 \\times 1" + }, + { + "bbox": [ + 313, + 388, + 555, + 508 + ], + "type": "text", + "content": " convolution branches are feasible, the current configuration is found to offer superior performance consistency during optimization." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 313, + 509, + 556, + 687 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 509, + 556, + 687 + ], + "spans": [ + { + "bbox": [ + 313, + 509, + 556, + 687 + ], + "type": "text", + "content": "Self distillation and progressive learning. Inspired by RIFE [42], self-distillation is incorporated into their training pipeline. The teacher model shares the identical backbone as the student model but includes three extra SPAB blocks appended to the student's backbone (Fig. 5). A self-distillation loss similar to RIFE's formulation is adopted to co-train the teacher and student networks. This design enables the teacher model to learn robust backbone features. After the distillation phase, the student loss and distillation loss components are removed, and the entire teacher model is fine-tuned. Leveraging the pre-trained robust teacher, progressive learning is employed: the extra SPAB blocks are gradually removed from the teacher's backbone, finally resulting in an architecture identical to the original student model." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 313, + 689, + 555, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 689, + 555, + 715 + ], + "spans": [ + { + "bbox": [ + 313, + 689, + 555, + 715 + ], + "type": "text", + "content": "Frequency-Aware Loss. Since small models have limited parameters, during training, they should make the model fo" + } + ] + } + ], + "index": 21 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 294, + 179 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 294, + 179 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 294, + 179 + ], + "type": "text", + "content": "cus more on important (or difficult) areas. In their methods, two types of frequency-aware losses are employed. The first type is the DCT loss. They use the discrete cosine transform (DCT) to convert the RGB domain to the frequency domain and then apply the L1 loss to calculate the difference. The other type is the edge loss. They add a blur to the image and then subtract the blurred image from the original one to obtain the high frequency area. Subsequently, the L1 loss is calculated on this high frequency area." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 180, + 294, + 216 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 180, + 294, + 216 + ], + "spans": [ + { + "bbox": [ + 55, + 180, + 294, + 216 + ], + "type": "text", + "content": "Training details: The training process contains two stages. And the training dataset is the DIV2K_LSDIR_train. General reparameterization is used on the whole process." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 216, + 294, + 239 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 216, + 294, + 239 + ], + "spans": [ + { + "bbox": [ + 55, + 216, + 294, + 239 + ], + "type": "text", + "content": "I. At the first stage, they use self distillation to train the teacher model." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 242, + 295, + 529 + ], + "type": "list", + "angle": 0, + "index": 6, + "blocks": [ + { + "bbox": [ + 55, + 242, + 295, + 361 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 242, + 295, + 361 + ], + "spans": [ + { + "bbox": [ + 55, + 242, + 295, + 361 + ], + "type": "text", + "content": "- Step1. The team first trains a 2x super-resolution model. HR patches of size 256x256 are randomly cropped from HR images, and the mini-batch size is set to 64. L1 loss and self distillation loss with AdamW optimizer are used and the initial learning rate is set to 0.0001 and halved at every 100k iterations. The total iterations is 500k. This step is repeated twice. And then they follow the same training setting and use 2x super-resolution model as pretrained model to train a 4x super-resolution model. This step is repeated twice." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 361, + 295, + 444 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 361, + 295, + 444 + ], + "spans": [ + { + "bbox": [ + 55, + 361, + 295, + 444 + ], + "type": "text", + "content": "- Step2. HR patches of size 512x512 are randomly cropped from HR images, and the mini-batch size is set to 16. MSE loss, frequency-aware loss and self distillation loss with AdamW optimizer are used and the initial learning rate is set to 0.0001 and halved at every 100k iterations. The total iterations is 500k. This step is also repeated twice." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 445, + 295, + 529 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 445, + 295, + 529 + ], + "spans": [ + { + "bbox": [ + 55, + 445, + 295, + 529 + ], + "type": "text", + "content": "- Step3. They only train the teacher model. HR patches of size 512x512 are randomly cropped from HR images, and the mini-batch size is set to 16. MSE loss and frequency-aware loss with AdamW optimizer are used and the initial learning rate is set to 0.00005 and halved at every 100k iterations. The total iterations is 500k. This step is also repeated twice." + } + ] + } + ], + "index": 5 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 55, + 532, + 295, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 532, + 295, + 555 + ], + "spans": [ + { + "bbox": [ + 55, + 532, + 295, + 555 + ], + "type": "text", + "content": "II. At the second stage, they use progressive learning to get the final student model." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 55, + 558, + 295, + 712 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 55, + 558, + 295, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 558, + 295, + 628 + ], + "spans": [ + { + "bbox": [ + 55, + 558, + 295, + 628 + ], + "type": "text", + "content": "- Step4. They drop the additional SPAB block one by one. HR patches of size 512x512 are randomly cropped from HR images, and the mini-batch size is set to 16. L1 loss with AdamW optimizer are used and the initial learning rate is set to 0.0001 and halved at every 100k iterations. The total iterations is 500k." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 55, + 630, + 295, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 630, + 295, + 712 + ], + "spans": [ + { + "bbox": [ + 55, + 630, + 295, + 712 + ], + "type": "text", + "content": "- Step5. They repeat the following training process many times until convergence. HR patches of size 512x512 are randomly cropped from HR images, and the mini-batch size is set to 16. MSE loss and frequency-aware loss with AdamW optimizer are used and the initial learning rate is set to 0.00005 and halved at every 100k iterations. The total iterations is 500k." + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 314, + 72, + 380, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 72, + 380, + 83 + ], + "spans": [ + { + "bbox": [ + 314, + 72, + 380, + 83 + ], + "type": "text", + "content": "4.6. VPEG_C" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 89, + 553, + 160 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 89, + 553, + 160 + ], + "spans": [ + { + "bbox": [ + 313, + 89, + 553, + 160 + ], + "type": "text", + "content": "General Method Description. As illustrated in Fig. 6, they propose a Dual Attention Network (DAN) for the lightweight single-image super-resolution task. The core components of DAN consist of three parts: a Local Residual Block (LRB), a Spatial Attention Block (SAB), and a Channel Attention Block (CAB)." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 161, + 553, + 244 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 161, + 553, + 244 + ], + "spans": [ + { + "bbox": [ + 313, + 161, + 553, + 244 + ], + "type": "text", + "content": "Local Residual Block (LRB). They leverage the " + }, + { + "bbox": [ + 313, + 161, + 553, + 244 + ], + "type": "inline_equation", + "content": "1 \\times 1" + }, + { + "bbox": [ + 313, + 161, + 553, + 244 + ], + "type": "text", + "content": " convolution layers followed by a " + }, + { + "bbox": [ + 313, + 161, + 553, + 244 + ], + "type": "inline_equation", + "content": "3 \\times 3" + }, + { + "bbox": [ + 313, + 161, + 553, + 244 + ], + "type": "text", + "content": " depthwise convolution as the basic unit, repeated three times. Specially, GELU activation is applied on each layers, and the features are passed in a densely connected manner. At the end of the block, feature maps from different levels are aggregated using channel concatenation, effectively capturing local image details." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 245, + 553, + 304 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 245, + 553, + 304 + ], + "spans": [ + { + "bbox": [ + 313, + 245, + 553, + 304 + ], + "type": "text", + "content": "Spatial Attention Block (SAB). They adopt the spatial attention design of SMFANet [144], which employs a variance-constrained feature modulation mechanism to aggregate spatial feature. This allows efficient spatial interaction with minimal computational cost." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 304, + 553, + 375 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 304, + 553, + 375 + ], + "spans": [ + { + "bbox": [ + 313, + 304, + 553, + 375 + ], + "type": "text", + "content": "Channel Attention Block (CAB). Global channel-wise information is modeled through a self-gating mechanism that enhances local representations and increases model non-linearity. This is followed by a key-value shared MDTA [132] for global interaction and a GDFN [132] for feature refinement." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 376, + 553, + 412 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 376, + 553, + 412 + ], + "spans": [ + { + "bbox": [ + 313, + 376, + 553, + 412 + ], + "type": "text", + "content": "Training Description. The proposed DAN consists of 6 feature mixing modules with 16 channels. The training process is divided into two stages:" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 310, + 414, + 553, + 604 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 311, + 414, + 553, + 509 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 414, + 553, + 509 + ], + "spans": [ + { + "bbox": [ + 311, + 414, + 553, + 509 + ], + "type": "text", + "content": "1. Pre-training Stage: They pre-train DAN using 800 images from the DIV2K [100] and the first 10K images of the LSDIR [64] datasets. The cropped LR image size is " + }, + { + "bbox": [ + 311, + 414, + 553, + 509 + ], + "type": "inline_equation", + "content": "72 \\times 72" + }, + { + "bbox": [ + 311, + 414, + 553, + 509 + ], + "type": "text", + "content": ", and the mini-batch size is set to 64. The DAN is trained by minimizing L1 loss and the frequency loss[14] with Adam optimizer for total 800, 000 iterations. The initial learning rate is set to 2e-3 and halved at 200K, 400K, 600K, 700K." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 310, + 510, + 553, + 604 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 510, + 553, + 604 + ], + "spans": [ + { + "bbox": [ + 310, + 510, + 553, + 604 + ], + "type": "text", + "content": "2. Fine-tuning Stage: They fine-tune the model on the 800 images of DIV2K [100] and the first 10K images of the LSDIR [64] datasets. The cropped LR image size is " + }, + { + "bbox": [ + 310, + 510, + 553, + 604 + ], + "type": "inline_equation", + "content": "72 \\times 72" + }, + { + "bbox": [ + 310, + 510, + 553, + 604 + ], + "type": "text", + "content": ", and the mini-batch size is set to 64. The DAN is trained by minimizing PSNR loss with the Adam optimizer for total 200, 000 iterations. They set the initial learning rate to 5e-4 and halve it at 50K, 100K, 150K, and 175 K." + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 314, + 612, + 389, + 624 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 612, + 389, + 624 + ], + "spans": [ + { + "bbox": [ + 314, + 612, + 389, + 624 + ], + "type": "text", + "content": "4.7. XUPTBoys" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 313, + 629, + 553, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 629, + 553, + 677 + ], + "spans": [ + { + "bbox": [ + 313, + 629, + 553, + 677 + ], + "type": "text", + "content": "General Method Description. The XUPTBoys team proposed the Frequency-Guided Multilevel Dispersion Network (FMDN), as shown in Fig. 7.FMDN adopts a similar basic framework to [45, 67, 71, 81]." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 313, + 677, + 553, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 677, + 553, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 677, + 553, + 713 + ], + "type": "text", + "content": "Based on the above analysis, they propose the new Frequency-Guided Multi-level Dispersion Block(FMDB) and the new Frequency-Guided Multi-level Dispersion" + } + ] + } + ], + "index": 22 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "spans": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 23 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 95, + 84, + 504, + 319 + ], + "blocks": [ + { + "bbox": [ + 95, + 84, + 504, + 319 + ], + "lines": [ + { + "bbox": [ + 95, + 84, + 504, + 319 + ], + "spans": [ + { + "bbox": [ + 95, + 84, + 504, + 319 + ], + "type": "image", + "image_path": "36311b6faf268e669dcd643228db99b83d29abb2a47e421a048368d4aa625818.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 209, + 331, + 400, + 342 + ], + "lines": [ + { + "bbox": [ + 209, + 331, + 400, + 342 + ], + "spans": [ + { + "bbox": [ + 209, + 331, + 400, + 342 + ], + "type": "text", + "content": "Figure 6. Team VPEG_C: An overview of the DAN." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 79, + 355, + 500, + 451 + ], + "blocks": [ + { + "bbox": [ + 79, + 355, + 500, + 451 + ], + "lines": [ + { + "bbox": [ + 79, + 355, + 500, + 451 + ], + "spans": [ + { + "bbox": [ + 79, + 355, + 500, + 451 + ], + "type": "image", + "image_path": "bd994d0588e6812fed53823ee522107759f02c0a3d97cd1a32939c53e159da5f.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 96, + 459, + 513, + 472 + ], + "lines": [ + { + "bbox": [ + 96, + 459, + 513, + 472 + ], + "spans": [ + { + "bbox": [ + 96, + 459, + 513, + 472 + ], + "type": "text", + "content": "Figure 7. Team XUPTBoys: The whole framework of Frequency-Guided Multi-level Dispersion Network (FMDN)." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 491, + 296, + 624 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 491, + 296, + 624 + ], + "spans": [ + { + "bbox": [ + 55, + 491, + 296, + 624 + ], + "type": "text", + "content": "Block Basic(FMDB-B) as the base block of FMDN. As shown in Fig. 8 they use Hierarchical Variance-guided Spatial Attention(HVSA), Reallocated Contrast-Aware Channel Attention (RCCA) as alternatives to Enhanced Spatial Attention (ESA) [73] and Contrast-Aware Channel Attention (CCA) [44], Frequency-Guided Residual block (FRB), Asymmetric FeedForward Network (AFFN), Multilevel Residual Convolution (MRConv) and Multilevel Residual Convolution Basic(MRConv-B). The difference between FMDB and FMDB-B is that the former uses MRConv, while the latter uses MRConv-B." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 629, + 296, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 629, + 296, + 715 + ], + "spans": [ + { + "bbox": [ + 55, + 629, + 296, + 715 + ], + "type": "text", + "content": "In HVSA, the effects of multilevel branching and local variance on performance are examined. Small-window multilevel branches fail to capture sufficient information, while local variance within a single branch can create significant weight disparities. To address these issues, [81] was enhanced to introduce the D5 and D7 branches, which effectively utilize local variance to capture information" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 313, + 491, + 555, + 708 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 491, + 555, + 708 + ], + "spans": [ + { + "bbox": [ + 313, + 491, + 555, + 708 + ], + "type": "text", + "content": "rich regions while balancing performance and complexity. In RCCA, this approach improves the traditional channel attention mechanism by not only reallocating weights across channels but also better managing shared information among them. Introduces complementary branches with " + }, + { + "bbox": [ + 313, + 491, + 555, + 708 + ], + "type": "inline_equation", + "content": "1 \\times 1" + }, + { + "bbox": [ + 313, + 491, + 555, + 708 + ], + "type": "text", + "content": " convolutions and GELU activation functions, which help redistribute complementary information, improving the uniqueness of each channel. In FRB, it enhances feature representation using convolutional layers and GELU activation. It normalizes input, extracts features with depthwise convolutions of different kernel sizes, and combines them through residual connections to preserve spatial information for effective image processing. In AFFN, it applies layer normalization and a " + }, + { + "bbox": [ + 313, + 491, + 555, + 708 + ], + "type": "inline_equation", + "content": "1 \\times 1" + }, + { + "bbox": [ + 313, + 491, + 555, + 708 + ], + "type": "text", + "content": " convolution to expand feature dimensions. It then uses two depthwise convolutions with different kernel sizes, combines the results with GELU activation, and projects the output back to the original dimension with a residual connection. In MRConv and" + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 732, + 310, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 732, + 310, + 742 + ], + "spans": [ + { + "bbox": [ + 300, + 732, + 310, + 742 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 63, + 71, + 340, + 137 + ], + "blocks": [ + { + "bbox": [ + 63, + 71, + 340, + 137 + ], + "lines": [ + { + "bbox": [ + 63, + 71, + 340, + 137 + ], + "spans": [ + { + "bbox": [ + 63, + 71, + 340, + 137 + ], + "type": "image", + "image_path": "44d502a6c0ed805d464c3c6f19148e6c1e459ecc1aa0d51d667b715fea387a24.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 345, + 72, + 515, + 139 + ], + "blocks": [ + { + "bbox": [ + 345, + 72, + 515, + 139 + ], + "lines": [ + { + "bbox": [ + 345, + 72, + 515, + 139 + ], + "spans": [ + { + "bbox": [ + 345, + 72, + 515, + 139 + ], + "type": "image", + "image_path": "6ea31666372ce72972d1e01c9f83342103a7a680943cd9cd751793bd9f5c5350.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 406, + 141, + 447, + 149 + ], + "lines": [ + { + "bbox": [ + 406, + 141, + 447, + 149 + ], + "spans": [ + { + "bbox": [ + 406, + 141, + 447, + 149 + ], + "type": "text", + "content": "(f) MRCov-B" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 77, + 140, + 226, + 351 + ], + "blocks": [ + { + "bbox": [ + 77, + 140, + 226, + 351 + ], + "lines": [ + { + "bbox": [ + 77, + 140, + 226, + 351 + ], + "spans": [ + { + "bbox": [ + 77, + 140, + 226, + 351 + ], + "type": "image", + "image_path": "c2192d342d64556a1a2860e014da3529e5c2dc30bbd7246a3a3f93bc4b03ebb7.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 145, + 353, + 173, + 361 + ], + "lines": [ + { + "bbox": [ + 145, + 353, + 173, + 361 + ], + "spans": [ + { + "bbox": [ + 145, + 353, + 173, + 361 + ], + "type": "text", + "content": "(b) HVSA" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 240, + 156, + 316, + 350 + ], + "blocks": [ + { + "bbox": [ + 240, + 156, + 316, + 350 + ], + "lines": [ + { + "bbox": [ + 240, + 156, + 316, + 350 + ], + "spans": [ + { + "bbox": [ + 240, + 156, + 316, + 350 + ], + "type": "image", + "image_path": "d0ea7d9b2ecaf56aa13a4f1b039a13ff28f10a58d64cc690912ef0f4e806004e.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 264, + 351, + 287, + 360 + ], + "lines": [ + { + "bbox": [ + 264, + 351, + 287, + 360 + ], + "spans": [ + { + "bbox": [ + 264, + 351, + 287, + 360 + ], + "type": "text", + "content": "(c) FRB" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 329, + 172, + 397, + 340 + ], + "blocks": [ + { + "bbox": [ + 329, + 172, + 397, + 340 + ], + "lines": [ + { + "bbox": [ + 329, + 172, + 397, + 340 + ], + "spans": [ + { + "bbox": [ + 329, + 172, + 397, + 340 + ], + "type": "image", + "image_path": "b758339bd61eafff580164a4527468d8f542327ebe7a1f14835a8397e41c3165.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 342, + 351, + 369, + 360 + ], + "lines": [ + { + "bbox": [ + 342, + 351, + 369, + 360 + ], + "spans": [ + { + "bbox": [ + 342, + 351, + 369, + 360 + ], + "type": "text", + "content": "(d) AFFN" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 55, + 376, + 555, + 420 + ], + "lines": [ + { + "bbox": [ + 55, + 376, + 555, + 420 + ], + "spans": [ + { + "bbox": [ + 55, + 376, + 555, + 420 + ], + "type": "text", + "content": "Figure 8. Team XUPTBoys: The details of each component. (a) FMDB: Frequency-Guided Multi-level Dispersion Block; (b) HVSA: Hierarchical Variance-guided Spatial Attention; (c) FRB: Frequency-Guided Residual Block; (d) AFFN: Asymmetric FeedForward Network; (e) RCCA: Reallocated Contrast-aware Channel Attention; (f) MRConv-B/MRConv: Multilevel Residual Convolution Basic and Multilevel Residual Convolution" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 408, + 178, + 496, + 334 + ], + "blocks": [ + { + "bbox": [ + 408, + 178, + 496, + 334 + ], + "lines": [ + { + "bbox": [ + 408, + 178, + 496, + 334 + ], + "spans": [ + { + "bbox": [ + 408, + 178, + 496, + 334 + ], + "type": "image", + "image_path": "b48235d2ae7a52f0f87af13e385c348076ea1d6bd0051f75995284647bdfd624.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 424, + 350, + 452, + 357 + ], + "lines": [ + { + "bbox": [ + 424, + 350, + 452, + 357 + ], + "spans": [ + { + "bbox": [ + 424, + 350, + 452, + 357 + ], + "type": "text", + "content": "(e) RCCA" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "bbox": [ + 55, + 442, + 295, + 491 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 442, + 295, + 491 + ], + "spans": [ + { + "bbox": [ + 55, + 442, + 295, + 491 + ], + "type": "text", + "content": "MRConv-B, MRConv and MRConv-B use convolution kernels of different sizes for parallel convolution, and finally activate the output using GELU and combine it with residual connections, effectively preserving spatial information." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 55, + 494, + 296, + 542 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 494, + 296, + 542 + ], + "spans": [ + { + "bbox": [ + 55, + 494, + 296, + 542 + ], + "type": "text", + "content": "Training Description. The proposed FMDN has 3 FMDB-Basic blocks and 1 FMDB block, in which the number of feature channels is set to 24. The details of the training steps are as follows:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 55, + 545, + 296, + 714 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 56, + 545, + 295, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 545, + 295, + 640 + ], + "spans": [ + { + "bbox": [ + 56, + 545, + 295, + 640 + ], + "type": "text", + "content": "1. Pretraining on the DIV2K [102] and and Flickr2K [70]. HR patches of size " + }, + { + "bbox": [ + 56, + 545, + 295, + 640 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 56, + 545, + 295, + 640 + ], + "type": "text", + "content": " are randomly cropped from HR images, and the mini-batch size is set to 64. The model is trained by minimizing the L1 loss function [77] with the Adam optimizer [53]. The initial learning rate is set to " + }, + { + "bbox": [ + 56, + 545, + 295, + 640 + ], + "type": "inline_equation", + "content": "2 \\times 10^{-3}" + }, + { + "bbox": [ + 56, + 545, + 295, + 640 + ], + "type": "text", + "content": " and halved at " + }, + { + "bbox": [ + 56, + 545, + 295, + 640 + ], + "type": "inline_equation", + "content": "\\{100k, 500k, 800k, 900k, 950k\\}" + }, + { + "bbox": [ + 56, + 545, + 295, + 640 + ], + "type": "text", + "content": "-iteration. The total number of iterations is " + }, + { + "bbox": [ + 56, + 545, + 295, + 640 + ], + "type": "inline_equation", + "content": "1000k" + }, + { + "bbox": [ + 56, + 545, + 295, + 640 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 55, + 642, + 296, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 642, + 296, + 714 + ], + "spans": [ + { + "bbox": [ + 55, + 642, + 296, + 714 + ], + "type": "text", + "content": "2. Finetuning on 800 images of DIV2K and the first 10k images of LSDIR [64]. HR patch size and mini-batch size are set to " + }, + { + "bbox": [ + 55, + 642, + 296, + 714 + ], + "type": "inline_equation", + "content": "384 \\times 384" + }, + { + "bbox": [ + 55, + 642, + 296, + 714 + ], + "type": "text", + "content": " and 64, respectively. The model is fine-tuned by minimizing L2 loss function [77]. The initial learning rate is set to " + }, + { + "bbox": [ + 55, + 642, + 296, + 714 + ], + "type": "inline_equation", + "content": "5 \\times 10^{-4}" + }, + { + "bbox": [ + 55, + 642, + 296, + 714 + ], + "type": "text", + "content": " and halved at " + }, + { + "bbox": [ + 55, + 642, + 296, + 714 + ], + "type": "inline_equation", + "content": "\\{500k\\}" + }, + { + "bbox": [ + 55, + 642, + 296, + 714 + ], + "type": "text", + "content": "-iteration. The total number of iterations is" + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 328, + 442, + 358, + 453 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 328, + 442, + 358, + 453 + ], + "spans": [ + { + "bbox": [ + 328, + 442, + 358, + 453 + ], + "type": "text", + "content": "1000k." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 314, + 465, + 389, + 477 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 465, + 389, + 477 + ], + "spans": [ + { + "bbox": [ + 314, + 465, + 389, + 477 + ], + "type": "text", + "content": "4.8. HannahSR" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 313, + 484, + 555, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 484, + 555, + 628 + ], + "spans": [ + { + "bbox": [ + 313, + 484, + 555, + 628 + ], + "type": "text", + "content": "General Method Description. The architecture of the proposed network is depicted in Fig. 9, which is inspired by previous studies such as AGDN [114], MDRN [80] and SPAN [109]. They propose a Multi-level Refinement and Bias-learnable Attention dual branch Network (MRBAN). More specifically, they build upon the AGDN framework by constructing another branch consisting of one " + }, + { + "bbox": [ + 313, + 484, + 555, + 628 + ], + "type": "inline_equation", + "content": "3 \\times 3" + }, + { + "bbox": [ + 313, + 484, + 555, + 628 + ], + "type": "text", + "content": " convolution layer (ISRB) and one " + }, + { + "bbox": [ + 313, + 484, + 555, + 628 + ], + "type": "inline_equation", + "content": "1 \\times 1" + }, + { + "bbox": [ + 313, + 484, + 555, + 628 + ], + "type": "text", + "content": " convolution layer to enhance the overall performance in a learnable way. Meanwhile, they replace the concat module in the AGDN with a direct element-wise summation, for the sake of harvesting significant savings of the parameters." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 313, + 630, + 556, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 630, + 556, + 715 + ], + "spans": [ + { + "bbox": [ + 313, + 630, + 556, + 715 + ], + "type": "text", + "content": "In addition, they propose the multi-level refinement and bias-learnable attention block (MRBAB) as the basic block of our network. As described in Figure 10, they attempt to minimize the information loss induced by Sigmoid module. When confronted with a negative input with a large absolute value, the output of the Sigmoid module will be approximately equal to zero, which results in remarkable" + } + ] + } + ], + "index": 20 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "spans": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 60, + 72, + 550, + 159 + ], + "blocks": [ + { + "bbox": [ + 60, + 72, + 550, + 159 + ], + "lines": [ + { + "bbox": [ + 60, + 72, + 550, + 159 + ], + "spans": [ + { + "bbox": [ + 60, + 72, + 550, + 159 + ], + "type": "image", + "image_path": "083bbb9f17a8948f4ae5b792fe6c480e191cd46a4ccd5b188e6f518badad2507.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 167, + 555, + 190 + ], + "lines": [ + { + "bbox": [ + 55, + 167, + 555, + 190 + ], + "spans": [ + { + "bbox": [ + 55, + 167, + 555, + 190 + ], + "type": "text", + "content": "Figure 9. Team HannahSR: The overall architecture of Multi-level Refinement and Bias-learnable Attention Dual Branch Network (MR-BAN)." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 98, + 205, + 512, + 319 + ], + "blocks": [ + { + "bbox": [ + 98, + 205, + 512, + 319 + ], + "lines": [ + { + "bbox": [ + 98, + 205, + 512, + 319 + ], + "spans": [ + { + "bbox": [ + 98, + 205, + 512, + 319 + ], + "type": "image", + "image_path": "39e5ebdf23d3857b266c36552ee68e47923ad4ffcfb55aa17c77ee301d9f96b3.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 227, + 325, + 383, + 335 + ], + "lines": [ + { + "bbox": [ + 227, + 325, + 383, + 335 + ], + "spans": [ + { + "bbox": [ + 227, + 325, + 383, + 335 + ], + "type": "text", + "content": "(a) Team HannahSR: The MRBAB architecture." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 91, + 345, + 523, + 571 + ], + "blocks": [ + { + "bbox": [ + 91, + 345, + 523, + 571 + ], + "lines": [ + { + "bbox": [ + 91, + 345, + 523, + 571 + ], + "spans": [ + { + "bbox": [ + 91, + 345, + 523, + 571 + ], + "type": "image", + "image_path": "308e52fdb5c90b5da45b454cd7914587960c012e3e6058e86a7865d419d13375.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 186, + 575, + 337, + 585 + ], + "lines": [ + { + "bbox": [ + 186, + 575, + 337, + 585 + ], + "spans": [ + { + "bbox": [ + 186, + 575, + 337, + 585 + ], + "type": "text", + "content": "(b) Team HannahSR: The MRBA architecture." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 55, + 599, + 555, + 644 + ], + "lines": [ + { + "bbox": [ + 55, + 599, + 555, + 644 + ], + "spans": [ + { + "bbox": [ + 55, + 599, + 555, + 644 + ], + "type": "text", + "content": "Figure 10. Team HannahSR: The detailed architecture of the network MRBAN. (a) MRBAB: Multi-level Refinement and Bias-learnable Attention Block; (b) MRBA: Multi-level Refinement and Bias-learnable Attention; Other components: BSRB: Blueprint Shallow Residual Block [66]; BSConv: Blueprint Separable Convolution [66]; RCCA: Reallocated Contrast-aware Channel Attention [114]; SGSA: Sparse Global Self-attention [114]." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 665, + 297, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 665, + 297, + 712 + ], + "spans": [ + { + "bbox": [ + 55, + 665, + 297, + 712 + ], + "type": "text", + "content": "information loss. To address this issue, SPAN [109] used an origin-symmetric activation function. They added a bias of " + }, + { + "bbox": [ + 55, + 665, + 297, + 712 + ], + "type": "inline_equation", + "content": "-0.5" + }, + { + "bbox": [ + 55, + 665, + 297, + 712 + ], + "type": "text", + "content": " to the Sigmoid function, which allowed the information carried by negative inputs to be taken into account." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 665, + 556, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 665, + 556, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 665, + 556, + 713 + ], + "type": "text", + "content": "However, when dealing with the larger positive inputs, their outputs would be approximately equal to 0.5. When compared with the original 1.0, they inevitably suffered from significant information loss. To tackle this issue, they set the" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "spans": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 295, + 108 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 295, + 108 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 295, + 108 + ], + "type": "text", + "content": "negative bias as a learnable parameter so that it can be updated dynamically during the training process to optimally boost the accuracy performance." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 109, + 295, + 204 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 109, + 295, + 204 + ], + "spans": [ + { + "bbox": [ + 55, + 109, + 295, + 204 + ], + "type": "text", + "content": "Eventually, they adopt the reparameterization technique. They replace the first " + }, + { + "bbox": [ + 55, + 109, + 295, + 204 + ], + "type": "inline_equation", + "content": "3 \\times 3" + }, + { + "bbox": [ + 55, + 109, + 295, + 204 + ], + "type": "text", + "content": " convolution layer with identical scale reparameterization block to extract richer local features for supplying the following layers with more valuable information, while standardizing the number of channels to an identical scale for lightweight super resolution networks to prevent incurring inappropriate model capacity increments." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 205, + 296, + 252 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 205, + 296, + 252 + ], + "spans": [ + { + "bbox": [ + 55, + 205, + 296, + 252 + ], + "type": "text", + "content": "Training Strategy. The proposed MRBAN consists of 4 MRBAB, and the feature channel is set to 32. They adopt a four-step training strategy. The details of the training steps are as follows:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 52, + 254, + 295, + 564 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 52, + 254, + 295, + 337 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 254, + 295, + 337 + ], + "spans": [ + { + "bbox": [ + 52, + 254, + 295, + 337 + ], + "type": "text", + "content": "1. Pretraining on the DIV2K [2] and Flickr2K [69] datasets with the patch size of " + }, + { + "bbox": [ + 52, + 254, + 295, + 337 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 52, + 254, + 295, + 337 + ], + "type": "text", + "content": " and the mini-batch size is set to 64. The MRBAN is trained by minimizing the L1 loss function with the Adam optimizer. The initial learning rate is set to " + }, + { + "bbox": [ + 52, + 254, + 295, + 337 + ], + "type": "inline_equation", + "content": "3 \\times 10^{-3}" + }, + { + "bbox": [ + 52, + 254, + 295, + 337 + ], + "type": "text", + "content": " and halved at " + }, + { + "bbox": [ + 52, + 254, + 295, + 337 + ], + "type": "inline_equation", + "content": "\\{100\\mathrm{k}, 500\\mathrm{k}, 800\\mathrm{k}, 900\\mathrm{k}, 950\\mathrm{k}\\}" + }, + { + "bbox": [ + 52, + 254, + 295, + 337 + ], + "type": "text", + "content": "-iteration. The number of iterations is " + }, + { + "bbox": [ + 52, + 254, + 295, + 337 + ], + "type": "inline_equation", + "content": "1000\\mathrm{k}" + }, + { + "bbox": [ + 52, + 254, + 295, + 337 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 52, + 338, + 295, + 409 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 338, + 295, + 409 + ], + "spans": [ + { + "bbox": [ + 52, + 338, + 295, + 409 + ], + "type": "text", + "content": "2. Initial fine-tuning on DIV2K and the first 10K images of LSDIR [64]. The patch size is " + }, + { + "bbox": [ + 52, + 338, + 295, + 409 + ], + "type": "inline_equation", + "content": "384 \\times 384" + }, + { + "bbox": [ + 52, + 338, + 295, + 409 + ], + "type": "text", + "content": " and the minibatch size is set to 32. The model is trained by minimizing the MSE loss function. The initial learning rate is set to " + }, + { + "bbox": [ + 52, + 338, + 295, + 409 + ], + "type": "inline_equation", + "content": "1.5 \\times 10^{-3}" + }, + { + "bbox": [ + 52, + 338, + 295, + 409 + ], + "type": "text", + "content": " and halved at " + }, + { + "bbox": [ + 52, + 338, + 295, + 409 + ], + "type": "inline_equation", + "content": "\\{100\\mathrm{k}, 500\\mathrm{k}, 800\\mathrm{k}, 900\\mathrm{k}, 950\\mathrm{k}\\}" + }, + { + "bbox": [ + 52, + 338, + 295, + 409 + ], + "type": "text", + "content": "-iteration. The number of iterations is " + }, + { + "bbox": [ + 52, + 338, + 295, + 409 + ], + "type": "inline_equation", + "content": "1000\\mathrm{k}" + }, + { + "bbox": [ + 52, + 338, + 295, + 409 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 52, + 410, + 295, + 493 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 410, + 295, + 493 + ], + "spans": [ + { + "bbox": [ + 52, + 410, + 295, + 493 + ], + "type": "text", + "content": "3. Advanced training on the DIV2K and the whole LSDIR datasets. The patch size is " + }, + { + "bbox": [ + 52, + 410, + 295, + 493 + ], + "type": "inline_equation", + "content": "384 \\times 384" + }, + { + "bbox": [ + 52, + 410, + 295, + 493 + ], + "type": "text", + "content": " and the mini-batch size is set to 64. The model is trained by minimizing the MSE loss function. The initial learning rate is set to " + }, + { + "bbox": [ + 52, + 410, + 295, + 493 + ], + "type": "inline_equation", + "content": "8 \\times 10^{-4}" + }, + { + "bbox": [ + 52, + 410, + 295, + 493 + ], + "type": "text", + "content": " and halved at " + }, + { + "bbox": [ + 52, + 410, + 295, + 493 + ], + "type": "inline_equation", + "content": "\\{100\\mathrm{k}, 500\\mathrm{k}, 800\\mathrm{k}, 900\\mathrm{k}, 950\\mathrm{k}\\}" + }, + { + "bbox": [ + 52, + 410, + 295, + 493 + ], + "type": "text", + "content": "-iteration. The number of iterations is " + }, + { + "bbox": [ + 52, + 410, + 295, + 493 + ], + "type": "inline_equation", + "content": "1000\\mathrm{k}" + }, + { + "bbox": [ + 52, + 410, + 295, + 493 + ], + "type": "text", + "content": ". This stage can be repeated twice." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 52, + 494, + 295, + 564 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 494, + 295, + 564 + ], + "spans": [ + { + "bbox": [ + 52, + 494, + 295, + 564 + ], + "type": "text", + "content": "4. Final fine-tuning on the DIV2K and the whole LSDIR datasets. The patch size is " + }, + { + "bbox": [ + 52, + 494, + 295, + 564 + ], + "type": "inline_equation", + "content": "448 \\times 448" + }, + { + "bbox": [ + 52, + 494, + 295, + 564 + ], + "type": "text", + "content": " and the mini-batch size is set to 128. The model is trained by minimizing the MSE loss function. The initial learning rate is set to " + }, + { + "bbox": [ + 52, + 494, + 295, + 564 + ], + "type": "inline_equation", + "content": "5 \\times 10^{-6}" + }, + { + "bbox": [ + 52, + 494, + 295, + 564 + ], + "type": "text", + "content": " and halved at " + }, + { + "bbox": [ + 52, + 494, + 295, + 564 + ], + "type": "inline_equation", + "content": "\\{100\\mathrm{k}, 500\\mathrm{k}, 800\\mathrm{k}, 900\\mathrm{k}, 950\\mathrm{k}\\}" + }, + { + "bbox": [ + 52, + 494, + 295, + 564 + ], + "type": "text", + "content": "-iteration. The number of iterations is " + }, + { + "bbox": [ + 52, + 494, + 295, + 564 + ], + "type": "inline_equation", + "content": "1000\\mathrm{k}" + }, + { + "bbox": [ + 52, + 494, + 295, + 564 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 55, + 575, + 115, + 586 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 575, + 115, + 586 + ], + "spans": [ + { + "bbox": [ + 55, + 575, + 115, + 586 + ], + "type": "text", + "content": "4.9. Davinci" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 55, + 594, + 296, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 594, + 296, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 594, + 296, + 713 + ], + "type": "text", + "content": "Final Solution Description. They chose the Swift Parameter-free Attention Network [112] as their base model, the winner of the NTIRE2024 ESR track. After trying the evolution pipeline mentioned in SwinFIR [133], the content decoupling strategy proposed in CoDe [31], the pre-training fine-tuning paradigm, and the model compression techniques such as model pruning and knowledge distillation discussed in Ref [51] respectively, they employ the model Pruning of the last layer with " + }, + { + "bbox": [ + 55, + 594, + 296, + 713 + ], + "type": "inline_equation", + "content": "l_{2}" + }, + { + "bbox": [ + 55, + 594, + 296, + 713 + ], + "type": "text", + "content": " norm of the baseline and introducing the mixup Augmentation as their final" + } + ] + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 316, + 69, + 556, + 467 + ], + "blocks": [ + { + "bbox": [ + 316, + 69, + 556, + 467 + ], + "lines": [ + { + "bbox": [ + 316, + 69, + 556, + 467 + ], + "spans": [ + { + "bbox": [ + 316, + 69, + 556, + 467 + ], + "type": "image", + "image_path": "de2cac9923bfa4f52967dd4330cfae9d5dfebaa792800e8bb8bb19662aebe5ea.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 313, + 472, + 555, + 518 + ], + "lines": [ + { + "bbox": [ + 313, + 472, + 555, + 518 + ], + "spans": [ + { + "bbox": [ + 313, + 472, + 555, + 518 + ], + "type": "text", + "content": "Figure 11. Team Rochester: They reduce the channel dimension from 48 to 28 from the original design and introduce additional convolution to stabilize the attention feature maps from SPAB blocks. Example input and output are adapted from [99]." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 526, + 555, + 550 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 526, + 555, + 550 + ], + "spans": [ + { + "bbox": [ + 313, + 526, + 555, + 550 + ], + "type": "text", + "content": "proposal to preserve the original parameter distributions as much as possible, termed PlayerAug." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 552, + 556, + 635 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 552, + 556, + 635 + ], + "spans": [ + { + "bbox": [ + 313, + 552, + 556, + 635 + ], + "type": "text", + "content": "Training Details. After pruning the SPAN, they train it on the DIV2K_LSDIR mixed training set, cropping the patch size to 512. The random rotation and flip are configured for data augmentation. The Adam [54] optimizer with " + }, + { + "bbox": [ + 313, + 552, + 556, + 635 + ], + "type": "inline_equation", + "content": "\\beta_{1} = 0.9" + }, + { + "bbox": [ + 313, + 552, + 556, + 635 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 552, + 556, + 635 + ], + "type": "inline_equation", + "content": "\\beta_{2} = 0.99" + }, + { + "bbox": [ + 313, + 552, + 556, + 635 + ], + "type": "text", + "content": " and the L1 loss function are adopted to optimize the models, and the mini-batch size is set to 32. All the experiments are conducted on 8 L40S GPUs." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 647, + 390, + 658 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 647, + 390, + 658 + ], + "spans": [ + { + "bbox": [ + 313, + 647, + 390, + 658 + ], + "type": "text", + "content": "4.10. Rochester" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 665, + 555, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 665, + 555, + 715 + ], + "spans": [ + { + "bbox": [ + 313, + 665, + 555, + 715 + ], + "type": "text", + "content": "Method Details. The proposed method, ESRNet, is an improved and more efficient variant of last year's XiaomiMM SPAN network [112]. The original SPAN network demonstrated strong generation quality but required" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 732, + 312, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 732, + 312, + 742 + ], + "spans": [ + { + "bbox": [ + 300, + 732, + 312, + 742 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 294, + 133 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 294, + 133 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 294, + 133 + ], + "type": "text", + "content": "complex training tricks and model fusion strategies, making it difficult to reproduce and computationally expensive. In contrast, ESRNet achieves similar performance with significantly reduced computational overhead, enhanced training stability, and improved inference speed." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 154, + 294, + 201 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 154, + 294, + 201 + ], + "spans": [ + { + "bbox": [ + 55, + 154, + 294, + 201 + ], + "type": "text", + "content": "Model Architecture. A key aspect of ESRNet's design is its ability to maintain high performance while reducing computational costs. As shown in Fig. 11, their modifications include:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 56, + 205, + 295, + 396 + ], + "type": "list", + "angle": 0, + "index": 5, + "blocks": [ + { + "bbox": [ + 56, + 205, + 295, + 277 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 205, + 295, + 277 + ], + "spans": [ + { + "bbox": [ + 56, + 205, + 295, + 277 + ], + "type": "text", + "content": "- Retaining the first six SPAN attention blocks as core feature extraction components while introducing a lightweight convolutional layer to refine the extracted feature maps before fusing them with the original features. This modification enhances feature representation while stabilizing the training process." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 56, + 277, + 295, + 348 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 277, + 295, + 348 + ], + "spans": [ + { + "bbox": [ + 56, + 277, + 295, + 348 + ], + "type": "text", + "content": "- Reducing the number of feature channels from 48 to 26, leading to a substantial decrease in both model parameters and floating-point operations (FLOPs). This reduction not only lowers GPU memory consumption but also improves inference efficiency without degrading performance." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 56, + 350, + 295, + 396 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 350, + 295, + 396 + ], + "spans": [ + { + "bbox": [ + 56, + 350, + 295, + 396 + ], + "type": "text", + "content": "- Improved validation speed, as ESRNet requires fewer computations per forward pass, making it more suitable for real-time applications compared with the baseline method." + } + ] + } + ], + "index": 4 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 55, + 401, + 295, + 462 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 401, + 295, + 462 + ], + "spans": [ + { + "bbox": [ + 55, + 401, + 295, + 462 + ], + "type": "text", + "content": "Overall, ESRNet has approximately half the number of parameters and FLOPs compared to the baseline EFPN network, yet it maintains a high PSNR score, demonstrating that their modifications achieve an excellent trade-off between efficiency and performance." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 482, + 295, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 482, + 295, + 555 + ], + "spans": [ + { + "bbox": [ + 55, + 482, + 295, + 555 + ], + "type": "text", + "content": "Training Methodology. They train ESRNet on RGB image patches of size " + }, + { + "bbox": [ + 55, + 482, + 295, + 555 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 55, + 482, + 295, + 555 + ], + "type": "text", + "content": ", applying standard augmentation techniques such as random flipping and rotation to enhance generalization. To ensure stable convergence and optimal performance, they adopt a three-stage training strategy:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 56, + 558, + 295, + 713 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 56, + 558, + 295, + 617 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 558, + 295, + 617 + ], + "spans": [ + { + "bbox": [ + 56, + 558, + 295, + 617 + ], + "type": "text", + "content": "1. Initial Feature Learning: They train the model with a batch size of 64 using Charbonnier loss, a robust loss function that mitigates the effects of outliers. The Adam optimizer is used with an initial learning rate of " + }, + { + "bbox": [ + 56, + 558, + 295, + 617 + ], + "type": "inline_equation", + "content": "2 \\times 10^{-4}" + }, + { + "bbox": [ + 56, + 558, + 295, + 617 + ], + "type": "text", + "content": ", which follows a cosine decay schedule." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 56, + 618, + 295, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 618, + 295, + 665 + ], + "spans": [ + { + "bbox": [ + 56, + 618, + 295, + 665 + ], + "type": "text", + "content": "2. Refinement Stage: They progressively decrease the learning rate linearly from " + }, + { + "bbox": [ + 56, + 618, + 295, + 665 + ], + "type": "inline_equation", + "content": "2 \\times 10^{-4}" + }, + { + "bbox": [ + 56, + 618, + 295, + 665 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 56, + 618, + 295, + 665 + ], + "type": "inline_equation", + "content": "2 \\times 10^{-5}" + }, + { + "bbox": [ + 56, + 618, + 295, + 665 + ], + "type": "text", + "content": ", allowing the model to refine its learned features while maintaining stable gradients." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 56, + 665, + 295, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 665, + 295, + 713 + ], + "spans": [ + { + "bbox": [ + 56, + 665, + 295, + 713 + ], + "type": "text", + "content": "3. Fine-Tuning with L2 Loss: In the final stage, they adopt L2 loss to fine-tune the model, further enhancing detail restoration. The learning rate is further reduced from " + }, + { + "bbox": [ + 56, + 665, + 295, + 713 + ], + "type": "inline_equation", + "content": "2 \\times 10^{-5}" + }, + { + "bbox": [ + 56, + 665, + 295, + 713 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 56, + 665, + 295, + 713 + ], + "type": "inline_equation", + "content": "1 \\times 10^{-6}" + }, + { + "bbox": [ + 56, + 665, + 295, + 713 + ], + "type": "text", + "content": " for smooth convergence." + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 313, + 72, + 553, + 119 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 553, + 119 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 553, + 119 + ], + "type": "text", + "content": "By structuring the training into these stages, they eliminate the need for complex training tricks used in previous approaches while achieving more stable and reliable optimization." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 121, + 553, + 229 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 121, + 553, + 229 + ], + "spans": [ + { + "bbox": [ + 313, + 121, + 553, + 229 + ], + "type": "text", + "content": "One of the most significant advantages of ESRNet is its improved validation time due to its optimized architecture. Compared to the original SPAN network, ESRNet achieves a similar PSNR score while reducing computational complexity. The model requires significantly fewer FLOPs and parameters, leading to a noticeable reduction in inference time and GPU memory usage. This makes ESRNet a practical solution for applications requiring both high-quality generation and efficient computation." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 314, + 238, + 368, + 249 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 238, + 368, + 249 + ], + "spans": [ + { + "bbox": [ + 314, + 238, + 368, + 249 + ], + "type": "text", + "content": "4.11.IESR" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 256, + 553, + 363 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 256, + 553, + 363 + ], + "spans": [ + { + "bbox": [ + 313, + 256, + 553, + 363 + ], + "type": "text", + "content": "Model Design. As for the Efficint Super-Resolution competition, they proposed the Inference Efficient Super-Resolution Net (IESRNet). IESRNet is not a specific network, but a bag of tricks to make a Super-Resolution Network infer more Efficient on a GPU. They will apply these tricks based on DIPNet [128], which won the first place on the NTIRE2023 ESR challenge in runtime track [65]. The specific structure of IESRNet is shown in Fig. 12. They will describe the tricks they used in detail below." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 365, + 553, + 712 + ], + "type": "list", + "angle": 0, + "index": 20, + "blocks": [ + { + "bbox": [ + 313, + 365, + 553, + 435 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 365, + 553, + 435 + ], + "spans": [ + { + "bbox": [ + 313, + 365, + 553, + 435 + ], + "type": "text", + "content": "1. Remove bias in Conv. The bias add of the convolution is a relatively inefficient operation in the convolution layer. It only occupies a small part of the FLOPs in the convolution, but occupies " + }, + { + "bbox": [ + 313, + 365, + 553, + 435 + ], + "type": "inline_equation", + "content": "15\\%" + }, + { + "bbox": [ + 313, + 365, + 553, + 435 + ], + "type": "text", + "content": " or more of the runtime. They removed the bias of all convolutional layers except the ESA module, and the PSNR loss was less than 0.01db." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 313, + 437, + 553, + 520 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 437, + 553, + 520 + ], + "spans": [ + { + "bbox": [ + 313, + 437, + 553, + 520 + ], + "type": "text", + "content": "2. Less Residual Connection. Although residual connection helps the model converge during training, too many residual structures will introduce many additional operations, reducing the inference efficiency of the model. Therefore, they replace the two middle RRFB in DIPNet with reparameterization no residual block(RNRB) to balance the trade-off between inference efficiency and model accuracy." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 313, + 521, + 553, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 521, + 553, + 640 + ], + "spans": [ + { + "bbox": [ + 313, + 521, + 553, + 640 + ], + "type": "text", + "content": "3. Standard number of Conv channels. Since the convolution operator has different performance optimizations for different configurations, generally, convolutions with a standard number of channels (such as 32, 48, and 64) are more deeply optimized and therefore occupy higher inference efficiency on the GPU. Based on NVIDIA V100 GPU testing, a 48-channel " + }, + { + "bbox": [ + 313, + 521, + 553, + 640 + ], + "type": "inline_equation", + "content": "3^{*}3" + }, + { + "bbox": [ + 313, + 521, + 553, + 640 + ], + "type": "text", + "content": " convolution is even faster than a 30-channel convolution, although the FLOPs is over doubled. For this reason, they set the number of feature channels to 32, and the number of ESA channels to 16." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 313, + 642, + 553, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 642, + 553, + 712 + ], + "spans": [ + { + "bbox": [ + 313, + 642, + 553, + 712 + ], + "type": "text", + "content": "4. Efficient activation function. They replace all activation functions in the network with SiLU [27], which performs well in super-resolution tasks and significantly outperforms the RELU. In addition to its great performance, SiLU is also very fast when inferring on GPUs due to its computational characteristics." + } + ] + } + ], + "index": 19 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 732, + 310, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 732, + 310, + 741 + ], + "spans": [ + { + "bbox": [ + 300, + 732, + 310, + 741 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 55, + 68, + 558, + 365 + ], + "blocks": [ + { + "bbox": [ + 55, + 68, + 558, + 365 + ], + "lines": [ + { + "bbox": [ + 55, + 68, + 558, + 365 + ], + "spans": [ + { + "bbox": [ + 55, + 68, + 558, + 365 + ], + "type": "image", + "image_path": "f7a459c9d7c6eda2acf426998d26a40c8bebbe250c1ca7ffe0217eb5634b2e71.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 105, + 373, + 504, + 385 + ], + "lines": [ + { + "bbox": [ + 105, + 373, + 504, + 385 + ], + "spans": [ + { + "bbox": [ + 105, + 373, + 504, + 385 + ], + "type": "text", + "content": "Figure 12. Team IRSR: The overview of the proposed IESRNet. The IESRNet is built based on DIPNet [128]." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 54, + 406, + 294, + 490 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 406, + 294, + 490 + ], + "spans": [ + { + "bbox": [ + 54, + 406, + 294, + 490 + ], + "type": "text", + "content": "5. Reparameterization. They adopt re-parameterization to enhance the representation capabilities of the model. They use complex re-parameterization structures to train during training and merge them into regular convolutions during inference without incurring additional computational overhead. The specific rep-structure is shown in Fig. 12(c)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 54, + 491, + 295, + 563 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 491, + 295, + 563 + ], + "spans": [ + { + "bbox": [ + 54, + 491, + 295, + 563 + ], + "type": "text", + "content": "Implementation Details. The training dataset consists of DIV2K and the first 15,000 images of LSIDR [64]. Random flipping and rotation are adopt for Data Augmentation. They adopt a multi-stage training paradigm to train their super-resolution network. The details of training steps are as follows:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 566, + 295, + 713 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 55, + 566, + 295, + 637 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 566, + 295, + 637 + ], + "spans": [ + { + "bbox": [ + 55, + 566, + 295, + 637 + ], + "type": "text", + "content": "1. Initial training: HR patches of size " + }, + { + "bbox": [ + 55, + 566, + 295, + 637 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 55, + 566, + 295, + 637 + ], + "type": "text", + "content": " are randomly cropped from HR images. They set the mini-batch as 128. The model is trained by minimizing the PSNR loss with the Adam optimizer. The initial learning rate is set to 5e-4, and halved per 200k iterations. The total number of iterations is 1000k." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 639, + 295, + 664 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 639, + 295, + 664 + ], + "spans": [ + { + "bbox": [ + 55, + 639, + 295, + 664 + ], + "type": "text", + "content": "2. Warm-Start Training: Load the pre-trained weight and train it three times with the same setting." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 666, + 295, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 666, + 295, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 666, + 295, + 713 + ], + "type": "text", + "content": "3. Finetune with increasing patch size: In this process, the training patch size is progressively increased to improve the performance, which is selected from [384, 512, 640]. For each patch size, they finetune the network with " + }, + { + "bbox": [ + 55, + 666, + 295, + 713 + ], + "type": "inline_equation", + "content": "1000\\mathrm{k}" + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 313, + 406, + 555, + 453 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 406, + 555, + 453 + ], + "spans": [ + { + "bbox": [ + 313, + 406, + 555, + 453 + ], + "type": "text", + "content": "iterations. And the initial learning rate is correspondingly selected from [2e-4, 1e-4, 5e-5]. The batch size decreases to 64 for saving GPU memory. All experiments are conducted on 8 NVIDIA V100 GPUs." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 465, + 365, + 477 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 465, + 365, + 477 + ], + "spans": [ + { + "bbox": [ + 313, + 465, + 365, + 477 + ], + "type": "text", + "content": "4.12. ASR" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 484, + 554, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 484, + 554, + 544 + ], + "spans": [ + { + "bbox": [ + 313, + 484, + 554, + 544 + ], + "type": "text", + "content": "Model Design. The network architecture is built based on DIPNet [128], which won the first place on the NTIRE2023 ESR challenge runtime track [65]. They made several modifications to make it more efficient while maintaining the excellent performance. They call it DIPNetSlim." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 546, + 555, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 546, + 555, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 546, + 555, + 713 + ], + "type": "text", + "content": "First of all, they did not use pruning as DIPNet dose. Although it can decrease the model parameters, it will degrade the inference speed of the model due to the irregular number of convolution channels. These operator configurations are not deeply optimized. For this reason, they set the number of feature channels to 32, and the number of ESA channels to 16. Second, they re-parameterize all 3x3 convolutional layers in the network. They adopt re-parameterization to enhance the expressiveness of the model. They use complex re-parameterization structures to train during training and merge them into regular convolutions during inference without incurring additional infer overhead. In addition, they changed the last convolution before the residual connection from 3x3 to 1x1, saving parameters while retain-" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "spans": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 294, + 120 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 294, + 120 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 294, + 120 + ], + "type": "text", + "content": "ing the ability of feature normalization. Finally, they replace all activation functions in the network with SiLU [27], which performs well in super-resolution tasks and significantly outperforms the RELU." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 121, + 295, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 121, + 295, + 156 + ], + "spans": [ + { + "bbox": [ + 55, + 121, + 295, + 156 + ], + "type": "text", + "content": "Implementation Details. The training dataset consists of DIV2K [103] and the first 15,000 images of LSIDR. The details of training steps are as follows:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 158, + 294, + 338 + ], + "type": "list", + "angle": 0, + "index": 5, + "blocks": [ + { + "bbox": [ + 55, + 158, + 294, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 158, + 294, + 228 + ], + "spans": [ + { + "bbox": [ + 55, + 158, + 294, + 228 + ], + "type": "text", + "content": "1. Initial Training: HR patches of size " + }, + { + "bbox": [ + 55, + 158, + 294, + 228 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 55, + 158, + 294, + 228 + ], + "type": "text", + "content": " are randomly cropped from HR images. They set the mini-batch as 128. The model is trained by minimizing the PSNR loss with the Adam optimizer. The initial learning rate is set to 5e-4, and halved per 200k iterations. The total number of iterations is 1000k." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 230, + 294, + 253 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 230, + 294, + 253 + ], + "spans": [ + { + "bbox": [ + 55, + 230, + 294, + 253 + ], + "type": "text", + "content": "2. Warm-Start Training: Load the pre-trained weight and train it three times with the same setting." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 255, + 294, + 338 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 255, + 294, + 338 + ], + "spans": [ + { + "bbox": [ + 55, + 255, + 294, + 338 + ], + "type": "text", + "content": "3. Finetune with increasing patch size: In this process, the training patch size is progressively increased to improve the performance, which is selected from [384, 512, 640]. For each patch size, they finetune the network with " + }, + { + "bbox": [ + 55, + 255, + 294, + 338 + ], + "type": "inline_equation", + "content": "1000k" + }, + { + "bbox": [ + 55, + 255, + 294, + 338 + ], + "type": "text", + "content": " iterations. And the initial learning rate is correspondingly selected from [2e-4, 1e-4, 5e-4]. The batch size decreases to 64 for saving GPU memory." + } + ] + } + ], + "index": 4 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 55, + 347, + 127, + 358 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 347, + 127, + 358 + ], + "spans": [ + { + "bbox": [ + 55, + 347, + 127, + 358 + ], + "type": "text", + "content": "4.13. VPEG_O" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 54, + 365, + 295, + 519 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 365, + 295, + 519 + ], + "spans": [ + { + "bbox": [ + 54, + 365, + 295, + 519 + ], + "type": "text", + "content": "General Method Description. They introduce SAFMnV3, an enhanced version of SAFMN [96] for solving real-time image SR. This solution is mainly concentrates on improving the effectiveness of the spatially-adaptive feature modulation (SAFM) [96] layer. Different from the original SAFMN, as shown in Fig 13, the simplified SAFM layer is able to extract both local and non-local features simultaneously without channel splitting. Within this module, they use two " + }, + { + "bbox": [ + 54, + 365, + 295, + 519 + ], + "type": "inline_equation", + "content": "3 \\times 3" + }, + { + "bbox": [ + 54, + 365, + 295, + 519 + ], + "type": "text", + "content": " convolutions to project the input and use variance-constrained feature modulation operator [144] in branches with fewer channels, and finally aggregate these two parts of the feature, then refine the aggregated features via a feed-forward neural network." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 54, + 521, + 295, + 593 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 521, + 295, + 593 + ], + "spans": [ + { + "bbox": [ + 54, + 521, + 295, + 593 + ], + "type": "text", + "content": "Training Description. The proposed SAFMNv3 consists of 6 feature mixing modules, and the number of channels is set to 40. They rain the network on RGB channels and augment the training data with random flipping and rotation. Following previous methods, the training process is divided into three stages:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 52, + 594, + 295, + 713 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 52, + 594, + 295, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 594, + 295, + 677 + ], + "spans": [ + { + "bbox": [ + 52, + 594, + 295, + 677 + ], + "type": "text", + "content": "1. In the first stage, they randomly crop " + }, + { + "bbox": [ + 52, + 594, + 295, + 677 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 52, + 594, + 295, + 677 + ], + "type": "text", + "content": " HR image patches from the selected LSIDR [64] dataset, with a batch size of 64. The proposed SAFMNv3 is trained by minimizing L1 loss and the frequency loss[14] with Adam optimizer for total 800, 000 iterations. The initial learning rate is set to 2e-3, with a Cosine Annealing scheme [78]." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 52, + 677, + 295, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 677, + 295, + 713 + ], + "spans": [ + { + "bbox": [ + 52, + 677, + 295, + 713 + ], + "type": "text", + "content": "2. In the second stage, they increase the size of the HR image patches to " + }, + { + "bbox": [ + 52, + 677, + 295, + 713 + ], + "type": "inline_equation", + "content": "384 \\times 384" + }, + { + "bbox": [ + 52, + 677, + 295, + 713 + ], + "type": "text", + "content": ". The model is fine-tuned on the DF2K [100] by minimizing Charbonnier loss function." + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 322, + 72, + 553, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 322, + 72, + 553, + 95 + ], + "spans": [ + { + "bbox": [ + 322, + 72, + 553, + 95 + ], + "type": "text", + "content": "The initial learning rate is set to 5e-4, and the total iterations is " + }, + { + "bbox": [ + 322, + 72, + 553, + 95 + ], + "type": "inline_equation", + "content": "500\\mathrm{k}" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 310, + 96, + 553, + 133 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 96, + 553, + 133 + ], + "spans": [ + { + "bbox": [ + 310, + 96, + 553, + 133 + ], + "type": "text", + "content": "3. In the third stage, the batch size is set to 64, and PSNR loss is adopted to optimize over " + }, + { + "bbox": [ + 310, + 96, + 553, + 133 + ], + "type": "inline_equation", + "content": "300\\mathrm{k}" + }, + { + "bbox": [ + 310, + 96, + 553, + 133 + ], + "type": "text", + "content": " iterations. The initial learning rate is set to 5e-5." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 135, + 553, + 171 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 135, + 553, + 171 + ], + "spans": [ + { + "bbox": [ + 313, + 135, + 553, + 171 + ], + "type": "text", + "content": "Throughout the training process, they also employ an Exponential Moving Average (EMA) strategy to enhance the robustness of training." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 187, + 375, + 198 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 187, + 375, + 198 + ], + "spans": [ + { + "bbox": [ + 313, + 187, + 375, + 198 + ], + "type": "text", + "content": "4.14.mmSR" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 208, + 553, + 530 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 208, + 553, + 530 + ], + "spans": [ + { + "bbox": [ + 313, + 208, + 553, + 530 + ], + "type": "text", + "content": "Method. They improve the model based on SAFMN++ [91] and name it FAnet as shown in Fig. 14. Compared to SAFMN++, their model achieves a higher PSNR with a lower computational cost. Unlike the original SAFMN++ method, they introduce modifications in both the data and model structure. In terms of model structure, as shown in the figure, they improve the Feature Mixing Module of the original architecture and incorporate the concept of reparameterization, designing the RFMM. They modify the convolutional extraction network preceding the original module into a parallel structure to accommodate multi-granularity feature extraction and apply re-parameterization [23] during inference. Furthermore, they adjust the downsampling factor in SimpleSAFM to 16 to achieve lower computational complexity. Regarding the data, in addition to utilizing the provided training dataset, they analyze the superresolution results of the model and identify common issues in fine-detail generation. Given constraints on model parameters and computational resources, it is impractical for a lightweight model to generate details identical to the ground truth. Therefore, they shift their focus to expanding the training dataset. Specifically, they use 10,800 images from the training dataset as input and employ convolutional neural networks such as Omni-SR [113] to generate new images. This additional data is incorporated into the training process to facilitate learning and mitigate the risk of learning bias caused by excessive learning difficulty." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 313, + 534, + 555, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 534, + 555, + 712 + ], + "spans": [ + { + "bbox": [ + 313, + 534, + 555, + 712 + ], + "type": "text", + "content": "Training Details. They train their model on the DIV2K [100], Flickr2K [70], and LSDIR [64] datasets. The cropped low-resolution (LR) image size is set to 64 × 64 and subjected to random flipping and rotation. The FAnet model is optimized using the Adam optimizer with L1 loss minimization in a multi-stage training scheme. During the training phase, they set the initial learning rate to " + }, + { + "bbox": [ + 313, + 534, + 555, + 712 + ], + "type": "inline_equation", + "content": "2 \\times 10^{-3}" + }, + { + "bbox": [ + 313, + 534, + 555, + 712 + ], + "type": "text", + "content": " and the minimum learning rate to " + }, + { + "bbox": [ + 313, + 534, + 555, + 712 + ], + "type": "inline_equation", + "content": "1 \\times 10^{-6}" + }, + { + "bbox": [ + 313, + 534, + 555, + 712 + ], + "type": "text", + "content": ", training for 500,000 iterations with a mini-batch size of 512. In finetuning stage, Initialized with training phase weights, they fine-tune the model with the given training dataset and additional dataset which is proposed as above. They finetune the model using a learning rate of " + }, + { + "bbox": [ + 313, + 534, + 555, + 712 + ], + "type": "inline_equation", + "content": "1 \\times 10^{-4}" + }, + { + "bbox": [ + 313, + 534, + 555, + 712 + ], + "type": "text", + "content": " and the minimum learning rate set to " + }, + { + "bbox": [ + 313, + 534, + 555, + 712 + ], + "type": "inline_equation", + "content": "1 \\times 10^{-6}" + }, + { + "bbox": [ + 313, + 534, + 555, + 712 + ], + "type": "text", + "content": ", with a mini-batch size of 64." + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "spans": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 61, + 73, + 550, + 175 + ], + "blocks": [ + { + "bbox": [ + 61, + 73, + 550, + 175 + ], + "lines": [ + { + "bbox": [ + 61, + 73, + 550, + 175 + ], + "spans": [ + { + "bbox": [ + 61, + 73, + 550, + 175 + ], + "type": "image", + "image_path": "8cf587e3cbc1927fbca4656b8736d9f84ac6f220c9227c1ad73401744af36b10.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 179, + 188, + 430, + 200 + ], + "lines": [ + { + "bbox": [ + 179, + 188, + 430, + 200 + ], + "spans": [ + { + "bbox": [ + 179, + 188, + 430, + 200 + ], + "type": "text", + "content": "Figure 13. Team VPEG_O: An overview of the proposed SAFMNv3." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 57, + 217, + 294, + 358 + ], + "blocks": [ + { + "bbox": [ + 57, + 217, + 294, + 358 + ], + "lines": [ + { + "bbox": [ + 57, + 217, + 294, + 358 + ], + "spans": [ + { + "bbox": [ + 57, + 217, + 294, + 358 + ], + "type": "image", + "image_path": "9bbcbdf88644d05c0209ff8adeee4dc89fcb240fb6ca41121a1750176f9fa5bd.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 365, + 296, + 388 + ], + "lines": [ + { + "bbox": [ + 55, + 365, + 296, + 388 + ], + "spans": [ + { + "bbox": [ + 55, + 365, + 296, + 388 + ], + "type": "text", + "content": "Figure 14. Team mmSR: The overall network architecture of FAnet." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 410, + 124, + 422 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 410, + 124, + 422 + ], + "spans": [ + { + "bbox": [ + 55, + 410, + 124, + 422 + ], + "type": "text", + "content": "4.15. ChanSR" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 428, + 296, + 524 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 428, + 296, + 524 + ], + "spans": [ + { + "bbox": [ + 55, + 428, + 296, + 524 + ], + "type": "text", + "content": "General Method Description. They propose the Edge Enhanced Convolutional Network (EECNet) for the efficient super-resolution task. The network architecture is inspired by the design of SRN [118], while fully exploring the capacity of reparameterizable convolution. The whole architecture is shown in Fig. 15(a). They introduce a predefined High-Pass Filter (HPF) branch to explicitly capture edge details, formulated as:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 110, + 535, + 295, + 574 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 535, + 295, + 574 + ], + "spans": [ + { + "bbox": [ + 110, + 535, + 295, + 574 + ], + "type": "interline_equation", + "content": "\\mathbf {K} _ {h p f} = \\frac {1}{1 6} \\left[ \\begin{array}{r r r} - 1 & - 2 & - 1 \\\\ - 2 & 1 2 & - 2 \\\\ - 1 & - 2 & - 1 \\end{array} \\right]. \\tag {8}", + "image_path": "f3d66744d65647250f814ecd1fd97932c61509acc985e03178f1944b07d12c62.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 581, + 295, + 688 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 581, + 295, + 688 + ], + "spans": [ + { + "bbox": [ + 55, + 581, + 295, + 688 + ], + "type": "text", + "content": "Then they integrate the proposed HPF into the EDBB [116], creating the subEEC module. As subEEC can be mathematically equivalent to a standard " + }, + { + "bbox": [ + 55, + 581, + 295, + 688 + ], + "type": "inline_equation", + "content": "3 \\times 3" + }, + { + "bbox": [ + 55, + 581, + 295, + 688 + ], + "type": "text", + "content": " convolution, they replace the original " + }, + { + "bbox": [ + 55, + 581, + 295, + 688 + ], + "type": "inline_equation", + "content": "3 \\times 3" + }, + { + "bbox": [ + 55, + 581, + 295, + 688 + ], + "type": "text", + "content": " convolution in RRRB [25] with our subEEC to obtain the final EEC architecture, whose structure is shown in Fig. 15(b). Notably, to ensure valid re-parameterization, they initialize the bias of the first convolution layer as zero to compensate for the zeropadding operation in subEEC." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 55, + 689, + 296, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 689, + 296, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 689, + 296, + 713 + ], + "type": "text", + "content": "To better capture global spatial information, they adopt the simplified Efficient Spatial Attention mechanism from" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 220, + 553, + 280 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 220, + 553, + 280 + ], + "spans": [ + { + "bbox": [ + 313, + 220, + 553, + 280 + ], + "type": "text", + "content": "SRN [118], whose structure is shown in Fig. 15(c). Compared with the original ESA, this implementation removes the " + }, + { + "bbox": [ + 313, + 220, + 553, + 280 + ], + "type": "inline_equation", + "content": "1 \\times 1" + }, + { + "bbox": [ + 313, + 220, + 553, + 280 + ], + "type": "text", + "content": " convolution layer and reduces computational complexity by employing only a single " + }, + { + "bbox": [ + 313, + 220, + 553, + 280 + ], + "type": "inline_equation", + "content": "3 \\times 3" + }, + { + "bbox": [ + 313, + 220, + 553, + 280 + ], + "type": "text", + "content": " convolution in the convolutional group." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 281, + 555, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 281, + 555, + 422 + ], + "spans": [ + { + "bbox": [ + 313, + 281, + 555, + 422 + ], + "type": "text", + "content": "Training Description. The proposed EECNet contains eight EEBs, in which they set the number of feature maps to 32. Also, the channel number of the ESA is set to 16 similar to [56]. Throughout the entire training process, they use the Adam optimizer [54], where " + }, + { + "bbox": [ + 313, + 281, + 555, + 422 + ], + "type": "inline_equation", + "content": "\\beta 1 = 0.9" + }, + { + "bbox": [ + 313, + 281, + 555, + 422 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 281, + 555, + 422 + ], + "type": "inline_equation", + "content": "\\beta 2 = 0.999" + }, + { + "bbox": [ + 313, + 281, + 555, + 422 + ], + "type": "text", + "content": ". The model is trained for " + }, + { + "bbox": [ + 313, + 281, + 555, + 422 + ], + "type": "inline_equation", + "content": "1000k" + }, + { + "bbox": [ + 313, + 281, + 555, + 422 + ], + "type": "text", + "content": " iterations in each stage. Input patches are randomly cropped and augmented. Data augmentation strategies included horizontal and vertical flips, and random rotations of 90, 180, and 270 degrees. Model training was performed using Pytorch 1.12.0 [85] on RTX 3090. Specifically, the training strategy consists of several steps as follows." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 424, + 554, + 640 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 313, + 424, + 554, + 519 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 424, + 554, + 519 + ], + "spans": [ + { + "bbox": [ + 313, + 424, + 554, + 519 + ], + "type": "text", + "content": "1. In the starting stage, they train the model from scratch on the 800 images of DIV2K [4] and the first 10k images of LSDIR [64] datasets. The model is trained for a total " + }, + { + "bbox": [ + 313, + 424, + 554, + 519 + ], + "type": "inline_equation", + "content": "10^{6}" + }, + { + "bbox": [ + 313, + 424, + 554, + 519 + ], + "type": "text", + "content": " iterations by minimizing L1 loss and FFT loss [15]. The HR patch size is set to " + }, + { + "bbox": [ + 313, + 424, + 554, + 519 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 313, + 424, + 554, + 519 + ], + "type": "text", + "content": ", while the mini-batch size is set to 64. They set the initial learning rate to " + }, + { + "bbox": [ + 313, + 424, + 554, + 519 + ], + "type": "inline_equation", + "content": "1 \\times 10^{-3}" + }, + { + "bbox": [ + 313, + 424, + 554, + 519 + ], + "type": "text", + "content": " and the minimum one to " + }, + { + "bbox": [ + 313, + 424, + 554, + 519 + ], + "type": "inline_equation", + "content": "1 \\times 10^{-5}" + }, + { + "bbox": [ + 313, + 424, + 554, + 519 + ], + "type": "text", + "content": ", which is updated by the Cosine Annealing scheme." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 520, + 553, + 591 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 520, + 553, + 591 + ], + "spans": [ + { + "bbox": [ + 313, + 520, + 553, + 591 + ], + "type": "text", + "content": "2. In the second stage, they increase the HR patch size to 384, while the mini-batch size is set to 32. The model is fine-tuned by minimizing the L1 loss and the FFT loss. They set the initial learning rate to " + }, + { + "bbox": [ + 313, + 520, + 553, + 591 + ], + "type": "inline_equation", + "content": "5 \\times 10^{-4}" + }, + { + "bbox": [ + 313, + 520, + 553, + 591 + ], + "type": "text", + "content": " and the minimum one to " + }, + { + "bbox": [ + 313, + 520, + 553, + 591 + ], + "type": "inline_equation", + "content": "1 \\times 10^{-6}" + }, + { + "bbox": [ + 313, + 520, + 553, + 591 + ], + "type": "text", + "content": ", which is updated by the Cosine Annealing scheme." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 592, + 553, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 592, + 553, + 640 + ], + "spans": [ + { + "bbox": [ + 313, + 592, + 553, + 640 + ], + "type": "text", + "content": "3. In the last stage, the model is fine-tuned with " + }, + { + "bbox": [ + 313, + 592, + 553, + 640 + ], + "type": "inline_equation", + "content": "480 \\times 480" + }, + { + "bbox": [ + 313, + 592, + 553, + 640 + ], + "type": "text", + "content": " HR patches, however, the loss function is changed to minimize the combination of L2 loss and FFT loss [15]. Other settings are the same as Stage 2." + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 313, + 648, + 421, + 659 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 648, + 421, + 659 + ], + "spans": [ + { + "bbox": [ + 313, + 648, + 421, + 659 + ], + "type": "text", + "content": "4.16. Pixel Alchemists" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 665, + 554, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 665, + 554, + 714 + ], + "spans": [ + { + "bbox": [ + 313, + 665, + 554, + 714 + ], + "type": "text", + "content": "Network Architecture. The overall architecture of team Pixel Alchemists is shown in Fig. 16. They propose a novel architecture named resolution-consistent UNet (RCUNet). The proposed network consists of four deep feature comple" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 732, + 310, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 732, + 310, + 742 + ], + "spans": [ + { + "bbox": [ + 300, + 732, + 310, + 742 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 69, + 69, + 541, + 157 + ], + "blocks": [ + { + "bbox": [ + 69, + 69, + 541, + 157 + ], + "lines": [ + { + "bbox": [ + 69, + 69, + 541, + 157 + ], + "spans": [ + { + "bbox": [ + 69, + 69, + 541, + 157 + ], + "type": "image", + "image_path": "e9e1cda4695875e0b22a2ee15705c87b10f30d2b2a898093416b94ff467341a4.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 69, + 160, + 541, + 305 + ], + "blocks": [ + { + "bbox": [ + 69, + 160, + 541, + 305 + ], + "lines": [ + { + "bbox": [ + 69, + 160, + 541, + 305 + ], + "spans": [ + { + "bbox": [ + 69, + 160, + 541, + 305 + ], + "type": "image", + "image_path": "d48024e2b80356cc2faa2c14f1af7be0760ff89822241d890034e1dceb40e2d0.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 188, + 315, + 420, + 326 + ], + "lines": [ + { + "bbox": [ + 188, + 315, + 420, + 326 + ], + "spans": [ + { + "bbox": [ + 188, + 315, + 420, + 326 + ], + "type": "text", + "content": "Figure 15. Team ChanSR: Network architecture of the EECNet." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 54, + 347, + 295, + 443 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 347, + 295, + 443 + ], + "spans": [ + { + "bbox": [ + 54, + 347, + 295, + 443 + ], + "type": "text", + "content": "ment and distillation blocks (DFCDB). Inspired by [35, 83], the input feature map is split along the channel dimension in each block. Then, four convolutional layers process one of the split feature maps to generate complementary features. The input features and complementary features are concatenated to avoid loss of input information and distilled by a conv-1 layer. Besides, the output feature map of DFCDB is further enhanced by the ESA layer [55]." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 54, + 445, + 295, + 576 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 445, + 295, + 576 + ], + "spans": [ + { + "bbox": [ + 54, + 445, + 295, + 576 + ], + "type": "text", + "content": "Online Convolutional Re-parameterization. Reparameterization [136] has improved the performance of image restoration models without introducing any inference cost. However, the training cost is large because of complicated training-time blocks. To reduce the large extra training cost, online convolutional re-parameterization [41] is employed by converting the complex blocks into a single convolutional layer during the training stage. The architecture of RepConv is shown in Fig. 17. It can be converted to a " + }, + { + "bbox": [ + 54, + 445, + 295, + 576 + ], + "type": "inline_equation", + "content": "3 \\times 3" + }, + { + "bbox": [ + 54, + 445, + 295, + 576 + ], + "type": "text", + "content": " convolution during training, which saves the training cost." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 577, + 295, + 611 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 577, + 295, + 611 + ], + "spans": [ + { + "bbox": [ + 55, + 577, + 295, + 611 + ], + "type": "text", + "content": "Training Details. The proposed RCUNet has four DFCDBs. The number of features is set to 48, and the number of ESA channels is set to 16." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 613, + 295, + 638 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 613, + 295, + 638 + ], + "spans": [ + { + "bbox": [ + 55, + 613, + 295, + 638 + ], + "type": "text", + "content": "DIV2K [4] and LSDIR [64] datasets are used for training. The training details are as follows:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 641, + 295, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 641, + 295, + 713 + ], + "spans": [ + { + "bbox": [ + 53, + 641, + 295, + 713 + ], + "type": "text", + "content": "1. The model is first trained from scratch with " + }, + { + "bbox": [ + 53, + 641, + 295, + 713 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 53, + 641, + 295, + 713 + ], + "type": "text", + "content": " patches randomly cropped from HR images from the DIV2K and LSDIR datasets. The mini-batch size is set to 64. The L1 loss and pyramid loss are minimized with the Adam optimizer. The initial learning rate is set to 1e-3 with a cosine annealing schedule. The total number of" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 322, + 347, + 402, + 358 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 322, + 347, + 402, + 358 + ], + "spans": [ + { + "bbox": [ + 322, + 347, + 402, + 358 + ], + "type": "text", + "content": "iterations is 1000k." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 358, + 553, + 407 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 358, + 553, + 407 + ], + "spans": [ + { + "bbox": [ + 313, + 358, + 553, + 407 + ], + "type": "text", + "content": "2 Then the model is initialized with the pre-trained weights of Stage 1. The MSE loss and pyramid loss is used for fine-tuning with " + }, + { + "bbox": [ + 313, + 358, + 553, + 407 + ], + "type": "inline_equation", + "content": "512 \\times 512" + }, + { + "bbox": [ + 313, + 358, + 553, + 407 + ], + "type": "text", + "content": " HR patches and a learning rate of 1e-5 for 500k iterations." + } + ] + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 332, + 418, + 538, + 588 + ], + "blocks": [ + { + "bbox": [ + 332, + 418, + 538, + 588 + ], + "lines": [ + { + "bbox": [ + 332, + 418, + 538, + 588 + ], + "spans": [ + { + "bbox": [ + 332, + 418, + 538, + 588 + ], + "type": "image", + "image_path": "aeb111111f6a66fd1e33711c57d02e8b37f987757aecc478ba83f7f117f8f563.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 328, + 602, + 539, + 613 + ], + "lines": [ + { + "bbox": [ + 328, + 602, + 539, + 613 + ], + "spans": [ + { + "bbox": [ + 328, + 602, + 539, + 613 + ], + "type": "text", + "content": "Figure 16. Team Pixel Alchemists: RCUNet Architecture." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 635, + 358, + 647 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 635, + 358, + 647 + ], + "spans": [ + { + "bbox": [ + 313, + 635, + 358, + 647 + ], + "type": "text", + "content": "4.17.LZ" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 653, + 553, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 653, + 553, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 653, + 553, + 713 + ], + "type": "text", + "content": "General Method Description. To enhance model complexity without increasing computational overhead, they focus on designing structurally simple yet expressively powerful components, notably through re-parameterization techniques. Drawing inspiration from ECBSR [137]," + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "spans": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "text", + "content": "19" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 18 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 80, + 76, + 276, + 178 + ], + "blocks": [ + { + "bbox": [ + 80, + 76, + 276, + 178 + ], + "lines": [ + { + "bbox": [ + 80, + 76, + 276, + 178 + ], + "spans": [ + { + "bbox": [ + 80, + 76, + 276, + 178 + ], + "type": "image", + "image_path": "e2f754ff416b95f767e85bf2241846e8853135980f1a33ca98e7b3b2dd78f4f5.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 143, + 186, + 227, + 194 + ], + "lines": [ + { + "bbox": [ + 143, + 186, + 227, + 194 + ], + "spans": [ + { + "bbox": [ + 143, + 186, + 227, + 194 + ], + "type": "text", + "content": "(a) Online Reparameterization" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 63, + 264, + 265, + 367 + ], + "blocks": [ + { + "bbox": [ + 60, + 209, + 290, + 220 + ], + "lines": [ + { + "bbox": [ + 60, + 209, + 290, + 220 + ], + "spans": [ + { + "bbox": [ + 60, + 209, + 290, + 220 + ], + "type": "text", + "content": "Figure 17. Team Pixel Alchemists: Online re-parameterization." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 63, + 264, + 265, + 367 + ], + "lines": [ + { + "bbox": [ + 63, + 264, + 265, + 367 + ], + "spans": [ + { + "bbox": [ + 63, + 264, + 265, + 367 + ], + "type": "image", + "image_path": "65bfab3183f17ccd152cdcd70375e893a18feaf358a53ada5a84bdc2975a7327.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 77, + 375, + 272, + 386 + ], + "lines": [ + { + "bbox": [ + 77, + 375, + 272, + 386 + ], + "spans": [ + { + "bbox": [ + 77, + 375, + 272, + 386 + ], + "type": "text", + "content": "Figure 18. Team LZ: Detailed architecture of TDESR." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 409, + 295, + 493 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 409, + 295, + 493 + ], + "spans": [ + { + "bbox": [ + 55, + 409, + 295, + 493 + ], + "type": "text", + "content": "their TDESR framework strategically implements reparameterization to improve super-resolution performance while preserving training efficiency. Following the reparameterization phase, they employ tensor decomposition for light-weight network design, where standard " + }, + { + "bbox": [ + 55, + 409, + 295, + 493 + ], + "type": "inline_equation", + "content": "3 \\times 3" + }, + { + "bbox": [ + 55, + 409, + 295, + 493 + ], + "type": "text", + "content": " convolutions are factorized into sequential " + }, + { + "bbox": [ + 55, + 409, + 295, + 493 + ], + "type": "inline_equation", + "content": "3 \\times 1" + }, + { + "bbox": [ + 55, + 409, + 295, + 493 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 55, + 409, + 295, + 493 + ], + "type": "inline_equation", + "content": "1 \\times 3" + }, + { + "bbox": [ + 55, + 409, + 295, + 493 + ], + "type": "text", + "content": " convolutional operations." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 494, + 295, + 649 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 494, + 295, + 649 + ], + "spans": [ + { + "bbox": [ + 55, + 494, + 295, + 649 + ], + "type": "text", + "content": "As illustrated in Fig. 18, their architecture comprises five TD Blocks interspersed with three standard " + }, + { + "bbox": [ + 55, + 494, + 295, + 649 + ], + "type": "inline_equation", + "content": "3 \\times 3" + }, + { + "bbox": [ + 55, + 494, + 295, + 649 + ], + "type": "text", + "content": " convolutions, implementing a skip connection through elementwise addition between the input features (processed by a " + }, + { + "bbox": [ + 55, + 494, + 295, + 649 + ], + "type": "inline_equation", + "content": "3 \\times 3" + }, + { + "bbox": [ + 55, + 494, + 295, + 649 + ], + "type": "text", + "content": " convolution) and intermediate feature maps. The network maintains 64 channels throughout, with tensor decomposition intermediate channels reduced to 32 for computational efficiency. They integrate insights from Swift-SR's parameter-free attention mechanism [112] to enhance feature representation. The final reconstruction stage employs PixelShuffle with 48 input channels for high-quality image upsampling, completing their balanced design of performance and efficiency." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 650, + 295, + 673 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 650, + 295, + 673 + ], + "spans": [ + { + "bbox": [ + 55, + 650, + 295, + 673 + ], + "type": "text", + "content": "Training Details. The training details of team LZ are as follows." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 56, + 677, + 295, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 677, + 295, + 714 + ], + "spans": [ + { + "bbox": [ + 56, + 677, + 295, + 714 + ], + "type": "text", + "content": "- Base Training (" + }, + { + "bbox": [ + 56, + 677, + 295, + 714 + ], + "type": "inline_equation", + "content": "\\times 2" + }, + { + "bbox": [ + 56, + 677, + 295, + 714 + ], + "type": "text", + "content": " upscaling) The model is initially trained for " + }, + { + "bbox": [ + 56, + 677, + 295, + 714 + ], + "type": "inline_equation", + "content": "\\times 2" + }, + { + "bbox": [ + 56, + 677, + 295, + 714 + ], + "type": "text", + "content": " super-resolution using randomly cropped " + }, + { + "bbox": [ + 56, + 677, + 295, + 714 + ], + "type": "inline_equation", + "content": "96 \\times 96" + }, + { + "bbox": [ + 56, + 677, + 295, + 714 + ], + "type": "text", + "content": " HR patches with a batch size of 32. They employ" + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 320, + 69, + 550, + 131 + ], + "blocks": [ + { + "bbox": [ + 320, + 69, + 550, + 131 + ], + "lines": [ + { + "bbox": [ + 320, + 69, + 550, + 131 + ], + "spans": [ + { + "bbox": [ + 320, + 69, + 550, + 131 + ], + "type": "image", + "image_path": "449a23221e1f675fa538f3ab016b13b78bd4d647f4c1a5ea675c158ab5a86d85.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 331, + 139, + 537, + 150 + ], + "lines": [ + { + "bbox": [ + 331, + 139, + 537, + 150 + ], + "spans": [ + { + "bbox": [ + 331, + 139, + 537, + 150 + ], + "type": "text", + "content": "Figure 19. Team Z6: Network architecture of GloReNet." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "bbox": [ + 321, + 172, + 553, + 232 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 321, + 172, + 553, + 232 + ], + "spans": [ + { + "bbox": [ + 321, + 172, + 553, + 232 + ], + "type": "text", + "content": "the Adam optimizer to minimize the L1 loss, starting with an initial learning rate of " + }, + { + "bbox": [ + 321, + 172, + 553, + 232 + ], + "type": "inline_equation", + "content": "1 \\times 10^{-4}" + }, + { + "bbox": [ + 321, + 172, + 553, + 232 + ], + "type": "text", + "content": " that decays via Multi-StepLR scheduler at the mid-training point. The training completes over 100 epochs, utilizing re-parameterization techniques throughout the process." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 314, + 232, + 553, + 507 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 314, + 232, + 553, + 316 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 232, + 553, + 316 + ], + "spans": [ + { + "bbox": [ + 314, + 232, + 553, + 316 + ], + "type": "text", + "content": "- Enhanced Resolution Training. Building upon the " + }, + { + "bbox": [ + 314, + 232, + 553, + 316 + ], + "type": "inline_equation", + "content": "\\times 2" + }, + { + "bbox": [ + 314, + 232, + 553, + 316 + ], + "type": "text", + "content": " pretrained weights, this phase increases the HR patch size to " + }, + { + "bbox": [ + 314, + 232, + 553, + 316 + ], + "type": "inline_equation", + "content": "128 \\times 128" + }, + { + "bbox": [ + 314, + 232, + 553, + 316 + ], + "type": "text", + "content": " while reducing the batch size to 16. All other hyperparameters (optimizer, learning rate schedule, and re-parameterization) remain consistent with Stage 1. The continued use of L1 loss maintains training stability during this resolution scaling phase." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 314, + 316, + 553, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 316, + 553, + 411 + ], + "spans": [ + { + "bbox": [ + 314, + 316, + 553, + 411 + ], + "type": "text", + "content": "- Convolutional Architecture Refinement. They implement standard " + }, + { + "bbox": [ + 314, + 316, + 553, + 411 + ], + "type": "inline_equation", + "content": "3 \\times 3" + }, + { + "bbox": [ + 314, + 316, + 553, + 411 + ], + "type": "text", + "content": " convolutional layers in this optimization stage, replacing previous architectural components. The training objective shifts to L2 loss minimization for fine-tuning, while preserving the fundamental network structure and parameter initialization from earlier stages. This transition enhances edge preservation in super-resolved outputs." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 314, + 411, + 553, + 507 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 411, + 553, + 507 + ], + "spans": [ + { + "bbox": [ + 314, + 411, + 553, + 507 + ], + "type": "text", + "content": "- Tensor Decomposition Optimization. The final refinement employs tensor decomposition techniques with dual loss supervision " + }, + { + "bbox": [ + 314, + 411, + 553, + 507 + ], + "type": "inline_equation", + "content": "(\\mathrm{L1} + \\mathrm{L2})" + }, + { + "bbox": [ + 314, + 411, + 553, + 507 + ], + "type": "text", + "content": ". Training progresses with " + }, + { + "bbox": [ + 314, + 411, + 553, + 507 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 314, + 411, + 553, + 507 + ], + "type": "text", + "content": " HR patches using a reduced batch size of 16 and lower initial learning rate " + }, + { + "bbox": [ + 314, + 411, + 553, + 507 + ], + "type": "inline_equation", + "content": "(1 \\times 10^{-5})" + }, + { + "bbox": [ + 314, + 411, + 553, + 507 + ], + "type": "text", + "content": ". They implement cosine annealing scheduling for smooth convergence, completing the multi-stage optimization process through L2-loss-focused fine-tuning.." + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 314, + 516, + 355, + 526 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 516, + 355, + 526 + ], + "spans": [ + { + "bbox": [ + 314, + 516, + 355, + 526 + ], + "type": "text", + "content": "4.18.Z6" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 313, + 533, + 553, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 533, + 553, + 616 + ], + "spans": [ + { + "bbox": [ + 313, + 533, + 553, + 616 + ], + "type": "text", + "content": "General Method Description. They introduce a lightweight and efficient image super-resolution (SR) network that leverages both global and local feature attention mechanisms to produce high-quality reconstructions. As depicted in Fig. 19, their network is divided into two main blocks named Global Feature Attention Block (GFAB) and Local Feature Attention Block (LFAB)." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 313, + 617, + 555, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 617, + 555, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 617, + 555, + 713 + ], + "type": "text", + "content": "GFAB is designed to capture large-scale context and dependencies across the entire image. Enhances globally significant features, helping the model learn the global information from input images. And LFAB can focus on refining fine-grained details and spatially localized information. Emphasizes subtle textural elements and sharp edges that are critical for upscaling. GFAB utilizes the parameter-free attention module (SPAN [111]) and LFAB uses Effi" + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 732, + 311, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 732, + 311, + 742 + ], + "spans": [ + { + "bbox": [ + 299, + 732, + 311, + 742 + ], + "type": "text", + "content": "20" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 19 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 294, + 203 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 294, + 203 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 294, + 203 + ], + "type": "text", + "content": "cient Spatial Attention (ESA) [72] to selectively highlight essential features. And all convolution layers applied reparameterization block [127]. The network begins with a series of convolution layers to extract initial features, which then pass through GFAB units for global attention. Subsequently, the output is processed by LFAB units for local attention, and finally, a PixelShuffle layer upscales the features to the target resolution. By combining these two parts, their method effectively preserves global context and local details, achieving a balance between high-quality reconstruction and efficient low computation." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 204, + 294, + 312 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 204, + 294, + 312 + ], + "spans": [ + { + "bbox": [ + 55, + 204, + 294, + 312 + ], + "type": "text", + "content": "Training Description. Their training process employs a scratch training stage and a fine-tuning stage. In the first scratch training stage, they use DIV2K datasets for the training dataset. In the fine-tuning stage, they use DIV2K and the first 10K LSDIR datasets for the training dataset. All experiments are carried out in the same experimental environment. The training process is executed using RTX A6000 GPUs. They use the Pytorch 1.13 version for all training steps." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 315, + 294, + 493 + ], + "type": "list", + "angle": 0, + "index": 4, + "blocks": [ + { + "bbox": [ + 55, + 315, + 293, + 386 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 315, + 293, + 386 + ], + "spans": [ + { + "bbox": [ + 55, + 315, + 293, + 386 + ], + "type": "text", + "content": "- Scratch train stage: In the first step, their model is trained from scratch. The LR patches were cropped from LR images with an 8 mini-batch of " + }, + { + "bbox": [ + 55, + 315, + 293, + 386 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 55, + 315, + 293, + 386 + ], + "type": "text", + "content": ". Adam optimizer is used with a learning rate of 0.0005 during scratch training. The cosine warm-up scheduler is used. The total number of epochs is set to 2000. They use the " + }, + { + "bbox": [ + 55, + 315, + 293, + 386 + ], + "type": "inline_equation", + "content": "l1" + }, + { + "bbox": [ + 55, + 315, + 293, + 386 + ], + "type": "text", + "content": " loss." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 387, + 294, + 493 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 387, + 294, + 493 + ], + "spans": [ + { + "bbox": [ + 55, + 387, + 294, + 493 + ], + "type": "text", + "content": "- Fine-tuning stage: In the second step, the model is initialized with the weights trained in the first step. To improve precision, they used the loss method " + }, + { + "bbox": [ + 55, + 387, + 294, + 493 + ], + "type": "inline_equation", + "content": "l2" + }, + { + "bbox": [ + 55, + 387, + 294, + 493 + ], + "type": "text", + "content": " loss. This stage improves the value of the peak signal-to-noise ratio (PSNR) by " + }, + { + "bbox": [ + 55, + 387, + 294, + 493 + ], + "type": "inline_equation", + "content": "0.05 \\sim 0.06" + }, + { + "bbox": [ + 55, + 387, + 294, + 493 + ], + "type": "text", + "content": " dB. In this step, The LR patches are cropped from LR images with 32 mini-batch " + }, + { + "bbox": [ + 55, + 387, + 294, + 493 + ], + "type": "inline_equation", + "content": "512 \\times 512" + }, + { + "bbox": [ + 55, + 387, + 294, + 493 + ], + "type": "text", + "content": " sizes. And the initial learning rate is set to 0.00005 and the Adam optimizer is used in conjunction with a cosine warm-up. The total epoch is set to 200 epochs." + } + ] + } + ], + "index": 3 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 55, + 503, + 132, + 514 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 503, + 132, + 514 + ], + "spans": [ + { + "bbox": [ + 55, + 503, + 132, + 514 + ], + "type": "text", + "content": "4.19. TACO_SR" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 521, + 294, + 664 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 521, + 294, + 664 + ], + "spans": [ + { + "bbox": [ + 55, + 521, + 294, + 664 + ], + "type": "text", + "content": "General Method Description. The overall architecture of their network is showed in Fig. 20(a), inspired by SPAN [110] and PFDNLite [91]. Motivated by the design of the Conv3XC module in SPAN, they introduce two additional parallel branches with varying channel expansion ratios, resulting in a novel convolution module termed TenInOneConv, which fuses multiple convolution kernels into a single equivalent kernel to improve inference efficiency. Furthermore, to enhance the model's capability in capturing local texture and detail features, the LocalAttention module, inspired by PFDNLite is integrated, allowing the network to better focus on informative regions within feature maps." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 665, + 294, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 665, + 294, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 665, + 294, + 713 + ], + "type": "text", + "content": "TenInOneSR employs four TenInOneBlock modules. Each of these blocks (detailed in Fig. 20(b)) begins with a LocalAttention module, which enhancing the network's ability to capture fine details. Subsequently, each block ap" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 72, + 553, + 119 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 553, + 119 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 553, + 119 + ], + "type": "text", + "content": "plies three cascaded TenInOneConv layers, interleaved with the SiLU activation function, to perform hierarchical feature refinement. The block concludes with a residual connection, allowing better gradient flow." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 121, + 553, + 217 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 121, + 553, + 217 + ], + "spans": [ + { + "bbox": [ + 313, + 121, + 553, + 217 + ], + "type": "text", + "content": "Notably, the behavior of the TenInOneConv differs between the training and inference phases. During training (Fig. 20(d)), TenInOneConv operates in a multi-branch configuration. It introduces three parallel convolutional branches with different channel expansion ratios (gains set as 1, 2, and 3), along with an additional skip connection. This multi-scale feature extraction enables the network to better aggregate complementary spatial features." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 217, + 553, + 300 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 217, + 553, + 300 + ], + "spans": [ + { + "bbox": [ + 313, + 217, + 553, + 300 + ], + "type": "text", + "content": "In the inference stage (Fig. 20(f)), for computational efficiency and faster runtime, these multiple convolution kernels are fused into a single equivalent convolution kernel. Specifically, the parallel branches and skip connection weights are mathematically combined to form one unified " + }, + { + "bbox": [ + 313, + 217, + 553, + 300 + ], + "type": "inline_equation", + "content": "3 \\times 3" + }, + { + "bbox": [ + 313, + 217, + 553, + 300 + ], + "type": "text", + "content": " convolutional kernel, significantly accelerating inference without compromising performance." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 314, + 553, + 529 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 314, + 553, + 529 + ], + "spans": [ + { + "bbox": [ + 313, + 314, + 553, + 529 + ], + "type": "text", + "content": "Training description. The proposed architecture is trained on two NVIDIA RTX Titan GPUs with a total of 48 GB memory. In the first training stage, the DIV2K dataset is augmented by a factor of " + }, + { + "bbox": [ + 313, + 314, + 553, + 529 + ], + "type": "inline_equation", + "content": "85 \\times" + }, + { + "bbox": [ + 313, + 314, + 553, + 529 + ], + "type": "text", + "content": " and registered into the LSDIR format, resulting in a large-scale training set containing 152,991 high-resolution RGB images. During this stage, training is conducted with 64 randomly cropped " + }, + { + "bbox": [ + 313, + 314, + 553, + 529 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 313, + 314, + 553, + 529 + ], + "type": "text", + "content": " patches per batch, using common augmentations such as random flipping and rotation. The model is optimized using the Adam optimizer with L1 loss for a total of 100,000 iterations. The learning rate is initialized at " + }, + { + "bbox": [ + 313, + 314, + 553, + 529 + ], + "type": "inline_equation", + "content": "5 \\times 10^{-4}" + }, + { + "bbox": [ + 313, + 314, + 553, + 529 + ], + "type": "text", + "content": " and decayed by half every 20,000 iterations. In the second stage, they keep the training strategy and hyperparameters unchanged, except for increasing the input patch size to " + }, + { + "bbox": [ + 313, + 314, + 553, + 529 + ], + "type": "inline_equation", + "content": "384 \\times 384" + }, + { + "bbox": [ + 313, + 314, + 553, + 529 + ], + "type": "text", + "content": " and reducing the batch size to 32 to fit GPU memory. Then another 100,000 training iterations are conducted to further improve the model's performance on higher-resolution textures." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 540, + 387, + 551 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 540, + 387, + 551 + ], + "spans": [ + { + "bbox": [ + 313, + 540, + 387, + 551 + ], + "type": "text", + "content": "4.20.AIOT.AI" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 557, + 553, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 557, + 553, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 557, + 553, + 713 + ], + "type": "text", + "content": "Method. The overall architecture of their network is shown in Fig. 21(a), inspired by the previous leading methods SPAN[112] and ECBSR[138]. They propose an Efficient channel attention super-resolution network acting on space (ECASNet). Specifically, on the basis of SPAB from SPAN, they combine edge-oriented convolution block (ECB) and regularization module (GCT) to form a new reparameterized feature extraction module named enhanced attention and re-parameterization block(EARB), as shown in Fig. 21(b). In addition, unlike SPAN, they find that using channel attention after feature map concatenating can significantly improve performance. For the sake of lightweight design, they use an efficient channel attention" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 732, + 310, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 732, + 310, + 742 + ], + "spans": [ + { + "bbox": [ + 299, + 732, + 310, + 742 + ], + "type": "text", + "content": "21" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 20 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 55, + 68, + 202, + 443 + ], + "blocks": [ + { + "bbox": [ + 55, + 68, + 202, + 443 + ], + "lines": [ + { + "bbox": [ + 55, + 68, + 202, + 443 + ], + "spans": [ + { + "bbox": [ + 55, + 68, + 202, + 443 + ], + "type": "image", + "image_path": "06848c39c978127dbf1a5777572509c2538e8cda239227794323ca26f32c9d74.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 175, + 449, + 435, + 462 + ], + "lines": [ + { + "bbox": [ + 175, + 449, + 435, + 462 + ], + "spans": [ + { + "bbox": [ + 175, + 449, + 435, + 462 + ], + "type": "text", + "content": "Figure 20. Team TACO_SR: The architecture of proposed TenInOneSR." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 211, + 68, + 361, + 280 + ], + "blocks": [ + { + "bbox": [ + 211, + 68, + 361, + 280 + ], + "lines": [ + { + "bbox": [ + 211, + 68, + 361, + 280 + ], + "spans": [ + { + "bbox": [ + 211, + 68, + 361, + 280 + ], + "type": "image", + "image_path": "5aeaf5ee1c5610a62b97273c69623209ee7d7db802eb52f815951d41d89ec85f.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 376, + 68, + 553, + 252 + ], + "blocks": [ + { + "bbox": [ + 376, + 68, + 553, + 252 + ], + "lines": [ + { + "bbox": [ + 376, + 68, + 553, + 252 + ], + "spans": [ + { + "bbox": [ + 376, + 68, + 553, + 252 + ], + "type": "image", + "image_path": "b2dafe71e9e3063b6f4e6e0d7fe0c81118d087eca6c5fbfdf98428625d1d76de.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 211, + 280, + 361, + 443 + ], + "blocks": [ + { + "bbox": [ + 211, + 280, + 361, + 443 + ], + "lines": [ + { + "bbox": [ + 211, + 280, + 361, + 443 + ], + "spans": [ + { + "bbox": [ + 211, + 280, + 361, + 443 + ], + "type": "image", + "image_path": "acb7e256b36e27fbb9227c2c97f2747745796ad01d4a69839743b8c4c6ab22db.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 378, + 260, + 557, + 443 + ], + "blocks": [ + { + "bbox": [ + 378, + 260, + 557, + 443 + ], + "lines": [ + { + "bbox": [ + 378, + 260, + 557, + 443 + ], + "spans": [ + { + "bbox": [ + 378, + 260, + 557, + 443 + ], + "type": "image", + "image_path": "49df396ea37e71a78da20d8f92ed483037fdc7b9986cdfebb18dda7676b67431.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 482, + 295, + 506 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 482, + 295, + 506 + ], + "spans": [ + { + "bbox": [ + 55, + 482, + 295, + 506 + ], + "type": "text", + "content": "module, called the efficient channel attention module which acts on space(CAS), as shown in Fig. 21(c)." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 506, + 296, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 506, + 296, + 639 + ], + "spans": [ + { + "bbox": [ + 55, + 506, + 296, + 639 + ], + "type": "text", + "content": "Training Detail. The datasets used for training include DIV2K and LSDIR. Imitating the previous method, the training process is divided into two stages. In the first stage, they randomly crop " + }, + { + "bbox": [ + 55, + 506, + 296, + 639 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 55, + 506, + 296, + 639 + ], + "type": "text", + "content": " HR image blocks from the ground truth image, batch is 16, and randomly flipped and rotated them. Using Adam optimizer, set " + }, + { + "bbox": [ + 55, + 506, + 296, + 639 + ], + "type": "inline_equation", + "content": "\\beta 1 = 0.9" + }, + { + "bbox": [ + 55, + 506, + 296, + 639 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 55, + 506, + 296, + 639 + ], + "type": "inline_equation", + "content": "\\beta 2 = 0.999" + }, + { + "bbox": [ + 55, + 506, + 296, + 639 + ], + "type": "text", + "content": ", and minimize L1 loss function. The initial learning rate is set to 5e-4, and the cosine learning rate attenuation strategy is adopted. Epoch is set to 200. In the second stage, they changed the loss function to L2, and other settings are the same as those in the first stage." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 55, + 647, + 122, + 659 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 647, + 122, + 659 + ], + "spans": [ + { + "bbox": [ + 55, + 647, + 122, + 659 + ], + "type": "text", + "content": "4.21.JNU620" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 55, + 665, + 296, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 665, + 296, + 715 + ], + "spans": [ + { + "bbox": [ + 55, + 665, + 296, + 715 + ], + "type": "text", + "content": "General Method Description. They propose a reparameterized residual local feature network (RepRLFN) for efficient image super-resolution, which is influenced by existing studies such as RepRFN [19] and RLFN [55]. Fig. 22" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 482, + 555, + 506 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 482, + 555, + 506 + ], + "spans": [ + { + "bbox": [ + 313, + 482, + 555, + 506 + ], + "type": "text", + "content": "illustrates the overall architecture of RepRLFN, which has been extensively validated in previous studies." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 507, + 555, + 626 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 507, + 555, + 626 + ], + "spans": [ + { + "bbox": [ + 313, + 507, + 555, + 626 + ], + "type": "text", + "content": "They replace the RLFB in RLFN [55] with their reparameterized residual local feature block (RepRLFB). RepBlock is the main component of RepRLFB, which employs multiple parallel branch structures to extract the features of different receptive fields and modes to improve performance. At the same time, the structural re-parameterization technology is leveraged to decouple the training and inference phases to avoid the problem that computational complexity increases caused by the introduction of multi-branch." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 628, + 556, + 664 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 628, + 556, + 664 + ], + "spans": [ + { + "bbox": [ + 313, + 628, + 556, + 664 + ], + "type": "text", + "content": "Training Strategy. The proposed RepRLFN consists of 4 RepRLFBs, with the number of feature channels set to 48. The details of the training steps are as follows:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 665, + 556, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 665, + 556, + 714 + ], + "spans": [ + { + "bbox": [ + 313, + 665, + 556, + 714 + ], + "type": "text", + "content": "1. In the first stage, the model is pre-trained on DIV2K [4]. HR patches of size " + }, + { + "bbox": [ + 313, + 665, + 556, + 714 + ], + "type": "inline_equation", + "content": "480 \\times 480" + }, + { + "bbox": [ + 313, + 665, + 556, + 714 + ], + "type": "text", + "content": " are randomly cropped from HR images, and the mini-batch size is set to 32. The model is trained by minimizing the L1 loss function" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 732, + 311, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 732, + 311, + 742 + ], + "spans": [ + { + "bbox": [ + 299, + 732, + 311, + 742 + ], + "type": "text", + "content": "22" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 21 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 63, + 70, + 532, + 168 + ], + "blocks": [ + { + "bbox": [ + 63, + 70, + 532, + 168 + ], + "lines": [ + { + "bbox": [ + 63, + 70, + 532, + 168 + ], + "spans": [ + { + "bbox": [ + 63, + 70, + 532, + 168 + ], + "type": "image", + "image_path": "cd71cb66b2605332b1a6f6ce4dc15f144a84d5fa912a642fd7c10063bb2be48b.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 264, + 170, + 332, + 184 + ], + "lines": [ + { + "bbox": [ + 264, + 170, + 332, + 184 + ], + "spans": [ + { + "bbox": [ + 264, + 170, + 332, + 184 + ], + "type": "text", + "content": "(b) ECASNet" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 56, + 202, + 293, + 292 + ], + "blocks": [ + { + "bbox": [ + 56, + 202, + 293, + 292 + ], + "lines": [ + { + "bbox": [ + 56, + 202, + 293, + 292 + ], + "spans": [ + { + "bbox": [ + 56, + 202, + 293, + 292 + ], + "type": "image", + "image_path": "d5280ba3c0c422f04fb814ae78615d895235df2419f06168de54ab34712b08dd.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 127, + 286, + 175, + 298 + ], + "lines": [ + { + "bbox": [ + 127, + 286, + 175, + 298 + ], + "spans": [ + { + "bbox": [ + 127, + 286, + 175, + 298 + ], + "type": "text", + "content": "(b) EARB" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 102, + 323, + 221, + 375 + ], + "blocks": [ + { + "bbox": [ + 102, + 323, + 221, + 375 + ], + "lines": [ + { + "bbox": [ + 102, + 323, + 221, + 375 + ], + "spans": [ + { + "bbox": [ + 102, + 323, + 221, + 375 + ], + "type": "image", + "image_path": "f9533c63c96b88d6982d85f2095e9f195b9d8592e0275d4fb83d1ad4cc7289c3.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 127, + 390, + 166, + 403 + ], + "lines": [ + { + "bbox": [ + 127, + 390, + 166, + 403 + ], + "spans": [ + { + "bbox": [ + 127, + 390, + 166, + 403 + ], + "type": "text", + "content": "(c) CAS" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 167, + 419, + 441, + 431 + ], + "lines": [ + { + "bbox": [ + 167, + 419, + 441, + 431 + ], + "spans": [ + { + "bbox": [ + 167, + 419, + 441, + 431 + ], + "type": "text", + "content": "Figure 21. Team AIOT.AI: Detailed architecture of the proposed ECASNet." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 306, + 204, + 555, + 373 + ], + "blocks": [ + { + "bbox": [ + 306, + 204, + 555, + 373 + ], + "lines": [ + { + "bbox": [ + 306, + 204, + 555, + 373 + ], + "spans": [ + { + "bbox": [ + 306, + 204, + 555, + 373 + ], + "type": "image", + "image_path": "8bd2ef8051dea56cf5a345b62b7708f0cb0526c294db8e195419e3b299cee319.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 403, + 390, + 470, + 404 + ], + "lines": [ + { + "bbox": [ + 403, + 390, + 470, + 404 + ], + "spans": [ + { + "bbox": [ + 403, + 390, + 470, + 404 + ], + "type": "text", + "content": "(d) RepConv" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 452, + 295, + 487 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 452, + 295, + 487 + ], + "spans": [ + { + "bbox": [ + 55, + 452, + 295, + 487 + ], + "type": "text", + "content": "using the Adam optimizer. The initial learning rate is set to 5e-4 and is halved every 200 epochs. The total number of epochs is 800." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 55, + 491, + 296, + 713 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 55, + 491, + 295, + 586 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 491, + 295, + 586 + ], + "spans": [ + { + "bbox": [ + 55, + 491, + 295, + 586 + ], + "type": "text", + "content": "2. In the second stage, the model is fine-tuned on 3450 images from DIV2K [4] and Flickr2k [101] (DF2K) and the first 10k images from LSDIR [64]. HR patches of size " + }, + { + "bbox": [ + 55, + 491, + 295, + 586 + ], + "type": "inline_equation", + "content": "640 \\times 640" + }, + { + "bbox": [ + 55, + 491, + 295, + 586 + ], + "type": "text", + "content": " are randomly cropped from HR images, and the mini-batch size is set to 32. The model is fine-tuned by minimizing the L2 loss function. The initial learning rate is set to 2e-4 and is halved every 5 epochs. The total number of epochs is 25." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 55, + 590, + 295, + 674 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 590, + 295, + 674 + ], + "spans": [ + { + "bbox": [ + 55, + 590, + 295, + 674 + ], + "type": "text", + "content": "3. In the third stage, the model is fine-tuned again on 3450 images from DF2K and the first 10k images from LSDIR [64]. The HR patch size and minibatch size are set to " + }, + { + "bbox": [ + 55, + 590, + 295, + 674 + ], + "type": "inline_equation", + "content": "640 \\times 640" + }, + { + "bbox": [ + 55, + 590, + 295, + 674 + ], + "type": "text", + "content": " and 32, respectively. The model is fine-tuned by minimizing the L2 loss function. The initial learning rate is set to 1e-4 and is halved every 5 epochs. The total number of epochs is 20." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 55, + 677, + 296, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 677, + 296, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 677, + 296, + 713 + ], + "type": "text", + "content": "4. In the fourth stage, the model is fine-tuned on 3450 images from DF2K and the first " + }, + { + "bbox": [ + 55, + 677, + 296, + 713 + ], + "type": "inline_equation", + "content": "10\\mathrm{k}" + }, + { + "bbox": [ + 55, + 677, + 296, + 713 + ], + "type": "text", + "content": " images from LSDIR [64]. The HR patch size and minibatch size are set" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 313, + 452, + 555, + 524 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 452, + 555, + 524 + ], + "spans": [ + { + "bbox": [ + 313, + 452, + 555, + 524 + ], + "type": "text", + "content": "to " + }, + { + "bbox": [ + 313, + 452, + 555, + 524 + ], + "type": "inline_equation", + "content": "640 \\times 640" + }, + { + "bbox": [ + 313, + 452, + 555, + 524 + ], + "type": "text", + "content": " and 32, respectively. The model is fine-tuned by minimizing the L2 loss function. The learning rate is set to 5e-5, and the total number of epochs is 10. To prevent over-fitting, the model ensemble via stochastic weight averaging [46] (SWA) is performed during the last 8 epochs to obtain the final model for testing." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 538, + 421, + 551 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 538, + 421, + 551 + ], + "spans": [ + { + "bbox": [ + 313, + 538, + 421, + 551 + ], + "type": "text", + "content": "4.22. LVGroup_HFUT" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 557, + 555, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 557, + 555, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 557, + 555, + 713 + ], + "type": "text", + "content": "General Method Description. The Swift Parameter-free Attention Network (SPAN) [112] introduces a novel parameter-free attention mechanism to address the tradeoff between performance and computational complexity, as shown in 23. SPAN employs symmetric activation functions (e.g., shifted Sigmoid) applied to convolutional layer outputs to generate attention maps without learnable parameters, enhancing high-contribution features while suppressing redundant information. Residual connections within each Swift Parameter-free Attention Block (SPAB) mitigate information loss and preserve low-level features. The lightweight architecture with cascaded SPABs achieves fast inference by avoiding parameter-heavy attention computa" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 732, + 310, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 732, + 310, + 742 + ], + "spans": [ + { + "bbox": [ + 299, + 732, + 310, + 742 + ], + "type": "text", + "content": "23" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 22 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 63, + 71, + 549, + 373 + ], + "blocks": [ + { + "bbox": [ + 63, + 71, + 549, + 373 + ], + "lines": [ + { + "bbox": [ + 63, + 71, + 549, + 373 + ], + "spans": [ + { + "bbox": [ + 63, + 71, + 549, + 373 + ], + "type": "image", + "image_path": "dfc46883933c059577cb6e6eeaa96eebd866af9c6728c75dfb5d979abc1dad54.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 185, + 383, + 425, + 395 + ], + "lines": [ + { + "bbox": [ + 185, + 383, + 425, + 395 + ], + "spans": [ + { + "bbox": [ + 185, + 383, + 425, + 395 + ], + "type": "text", + "content": "Figure 22. Team JUN620: The network architecture of RepRLFN" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 59, + 417, + 293, + 494 + ], + "blocks": [ + { + "bbox": [ + 59, + 417, + 293, + 494 + ], + "lines": [ + { + "bbox": [ + 59, + 417, + 293, + 494 + ], + "spans": [ + { + "bbox": [ + 59, + 417, + 293, + 494 + ], + "type": "image", + "image_path": "de492b7ced705d8f7a88d48385420eb72c09a6323ea94714fef518a990277b96.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 62, + 502, + 288, + 514 + ], + "lines": [ + { + "bbox": [ + 62, + 502, + 288, + 514 + ], + "spans": [ + { + "bbox": [ + 62, + 502, + 288, + 514 + ], + "type": "text", + "content": "Figure 23. LVGroup_HFUT: The overall framework of SPAN." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 574, + 295, + 598 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 574, + 295, + 598 + ], + "spans": [ + { + "bbox": [ + 55, + 574, + 295, + 598 + ], + "type": "text", + "content": "tions while maintaining reconstruction quality through hierarchical feature aggregation and pixel-shuffle upsampling." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 617, + 296, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 617, + 296, + 712 + ], + "spans": [ + { + "bbox": [ + 55, + 617, + 296, + 712 + ], + "type": "text", + "content": "Training Details. They trained the SPAN model [112] on a mixed dataset composed of DIV2K [104] and LSDIR [64], setting feature_channels to 48, where the crop size of images is " + }, + { + "bbox": [ + 55, + 617, + 296, + 712 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 55, + 617, + 296, + 712 + ], + "type": "text", + "content": ". They used the Adam optimizer with L1 loss, an initial learning rate of 5e-4, and trained for a total of 1000k iterations, halving the learning rate every 200k iterations. Training was completed using a single NVIDIA RTX 4090 GPU." + } + ] + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 316, + 414, + 553, + 449 + ], + "blocks": [ + { + "bbox": [ + 316, + 414, + 553, + 449 + ], + "lines": [ + { + "bbox": [ + 316, + 414, + 553, + 449 + ], + "spans": [ + { + "bbox": [ + 316, + 414, + 553, + 449 + ], + "type": "image", + "image_path": "f9f256e2bdc25f83ac89a801417e8634751d782b6c46300dbea253a6df644900.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 313, + 457, + 555, + 479 + ], + "lines": [ + { + "bbox": [ + 313, + 457, + 555, + 479 + ], + "spans": [ + { + "bbox": [ + 313, + 457, + 555, + 479 + ], + "type": "text", + "content": "Figure 24. Team YG: The Spatial-gate self-distillation network (SGSDN)." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "bbox": [ + 314, + 498, + 359, + 509 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 498, + 359, + 509 + ], + "spans": [ + { + "bbox": [ + 314, + 498, + 359, + 509 + ], + "type": "text", + "content": "4.23.YG" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 314, + 516, + 414, + 526 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 516, + 414, + 526 + ], + "spans": [ + { + "bbox": [ + 314, + 516, + 414, + 526 + ], + "type": "text", + "content": "4.23.1. Method Details." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 531, + 555, + 590 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 531, + 555, + 590 + ], + "spans": [ + { + "bbox": [ + 313, + 531, + 555, + 590 + ], + "type": "text", + "content": "The Primary idea of the proposed SGSDN is to explore nonlocal information in a SA-like manner while modeling local details for efficient image super-resolution. This section will start by introducing the overall architecture of SGSDN and then explain the SGM and ESD in detail." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 591, + 555, + 651 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 591, + 555, + 651 + ], + "spans": [ + { + "bbox": [ + 313, + 591, + 555, + 651 + ], + "type": "text", + "content": "Network Architecture The overall structure of the SGSDN is shown in Fig. 24. It consists of three stages: shallow feature extraction, deep feature extraction, and image reconstruction. First, they use a " + }, + { + "bbox": [ + 313, + 591, + 555, + 651 + ], + "type": "inline_equation", + "content": "3 \\times 3" + }, + { + "bbox": [ + 313, + 591, + 555, + 651 + ], + "type": "text", + "content": " convolutional layer to extract shallow features, which is expressed as:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 387, + 658, + 553, + 671 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 387, + 658, + 553, + 671 + ], + "spans": [ + { + "bbox": [ + 387, + 658, + 553, + 671 + ], + "type": "interline_equation", + "content": "\\mathbf {I} _ {s} = F _ {\\text {C o n v 3} \\times 3} (\\mathbf {I} _ {L R}), \\tag {9}", + "image_path": "4c3c5b5fbca74d5852049084b3b57cd3a1d788d88fbe4b9e13984f18476f973b.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 677, + 555, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 677, + 555, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 677, + 555, + 713 + ], + "type": "text", + "content": "where, " + }, + { + "bbox": [ + 313, + 677, + 555, + 713 + ], + "type": "inline_equation", + "content": "F_{Conv3 \\times 3}" + }, + { + "bbox": [ + 313, + 677, + 555, + 713 + ], + "type": "text", + "content": " represents the shallow feature extraction module using a " + }, + { + "bbox": [ + 313, + 677, + 555, + 713 + ], + "type": "inline_equation", + "content": "3 \\times 3" + }, + { + "bbox": [ + 313, + 677, + 555, + 713 + ], + "type": "text", + "content": " convolutional layer. The obtained shallow feature is denoted as " + }, + { + "bbox": [ + 313, + 677, + 555, + 713 + ], + "type": "inline_equation", + "content": "\\mathbf{I}_s" + }, + { + "bbox": [ + 313, + 677, + 555, + 713 + ], + "type": "text", + "content": ". Subsequently, the extracted" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 732, + 311, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 732, + 311, + 742 + ], + "spans": [ + { + "bbox": [ + 299, + 732, + 311, + 742 + ], + "type": "text", + "content": "24" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 23 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 94, + 71, + 258, + 164 + ], + "blocks": [ + { + "bbox": [ + 94, + 71, + 258, + 164 + ], + "lines": [ + { + "bbox": [ + 94, + 71, + 258, + 164 + ], + "spans": [ + { + "bbox": [ + 94, + 71, + 258, + 164 + ], + "type": "image", + "image_path": "cba831f90e681281e70d07836d242412ebf29f6e98714494d5a04829c493e39c.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 175, + 295, + 208 + ], + "lines": [ + { + "bbox": [ + 55, + 175, + 295, + 208 + ], + "spans": [ + { + "bbox": [ + 55, + 175, + 295, + 208 + ], + "type": "text", + "content": "Figure 25. Team YG: The details of each component. (a) SGM: Spatial-gate modulation module; (b) ESD: Enhanced self-distillation module." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 229, + 296, + 264 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 229, + 296, + 264 + ], + "spans": [ + { + "bbox": [ + 55, + 229, + 296, + 264 + ], + "type": "text", + "content": "shallow features are fed to several stacked SGSDBs to produce deep representative features, This process can be expressed as:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 99, + 272, + 295, + 286 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 99, + 272, + 295, + 286 + ], + "spans": [ + { + "bbox": [ + 99, + 272, + 295, + 286 + ], + "type": "interline_equation", + "content": "\\mathbf {I} _ {k} = F _ {S G S D B} ^ {k} \\left(\\mathbf {I} _ {k - 1}\\right), k = 1, \\dots , n, \\tag {10}", + "image_path": "24f47da16d2adcd8bc459c4799e0339a09ae21d1339ca0fdeda369a5c77fcfac.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 294, + 296, + 354 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 294, + 296, + 354 + ], + "spans": [ + { + "bbox": [ + 55, + 294, + 296, + 354 + ], + "type": "text", + "content": "where, " + }, + { + "bbox": [ + 55, + 294, + 296, + 354 + ], + "type": "inline_equation", + "content": "F_{SGSDB}^{k}(\\cdot)" + }, + { + "bbox": [ + 55, + 294, + 296, + 354 + ], + "type": "text", + "content": " represents the " + }, + { + "bbox": [ + 55, + 294, + 296, + 354 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 55, + 294, + 296, + 354 + ], + "type": "text", + "content": "-th SGSDB, " + }, + { + "bbox": [ + 55, + 294, + 296, + 354 + ], + "type": "inline_equation", + "content": "\\mathbf{I}_{k-1}" + }, + { + "bbox": [ + 55, + 294, + 296, + 354 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 55, + 294, + 296, + 354 + ], + "type": "inline_equation", + "content": "\\mathbf{I}_k" + }, + { + "bbox": [ + 55, + 294, + 296, + 354 + ], + "type": "text", + "content": " denote the input and output features of the " + }, + { + "bbox": [ + 55, + 294, + 296, + 354 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 55, + 294, + 296, + 354 + ], + "type": "text", + "content": "-th SGSDB, respectively. Each SGSDB consists of three SGMs and an ESD. Given an input feature " + }, + { + "bbox": [ + 55, + 294, + 296, + 354 + ], + "type": "inline_equation", + "content": "\\mathbf{I}_t" + }, + { + "bbox": [ + 55, + 294, + 296, + 354 + ], + "type": "text", + "content": ", the mapping process of SGSDB can be represented as:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 127, + 362, + 294, + 420 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 127, + 362, + 294, + 420 + ], + "spans": [ + { + "bbox": [ + 127, + 362, + 294, + 420 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathbf {I} _ {d _ {1}} = F _ {S G M} (\\mathbf {I} _ {t}), \\\\ \\mathbf {I} _ {d _ {2}} = F _ {S G M} (\\mathbf {I} _ {d _ {1}}), \\\\ \\mathbf {I} _ {d _ {3}} = F _ {S G M} \\left(\\mathbf {I} _ {d _ {2}}\\right) + \\mathbf {I} _ {t}, \\\\ \\mathbf {I} _ {o} = F _ {E S D} (\\mathbf {I} _ {d _ {3}}) + \\mathbf {I} _ {d _ {3}} \\\\ \\end{array}", + "image_path": "b0e310266127b1fd1ba2aeb6da985df30d147f9c916beb447053487d3ee36743.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 427, + 296, + 523 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 427, + 296, + 523 + ], + "spans": [ + { + "bbox": [ + 55, + 427, + 296, + 523 + ], + "type": "text", + "content": "where, " + }, + { + "bbox": [ + 55, + 427, + 296, + 523 + ], + "type": "inline_equation", + "content": "F_{SGM}" + }, + { + "bbox": [ + 55, + 427, + 296, + 523 + ], + "type": "text", + "content": " represents the SGM, " + }, + { + "bbox": [ + 55, + 427, + 296, + 523 + ], + "type": "inline_equation", + "content": "F_{ESD}" + }, + { + "bbox": [ + 55, + 427, + 296, + 523 + ], + "type": "text", + "content": " represents the ESD. After the deep feature extraction block, the representative features are processed by a " + }, + { + "bbox": [ + 55, + 427, + 296, + 523 + ], + "type": "inline_equation", + "content": "3 \\times 3" + }, + { + "bbox": [ + 55, + 427, + 296, + 523 + ], + "type": "text", + "content": " standard convolution layer and a pixel shuffle operation [94] to reconstruct the high-quality SR image. To take advantage of high-frequency information, they insert a long-distance residual connection before the image reconstruction module. The reconstruction stage is described as follows" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 78, + 532, + 295, + 544 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 532, + 295, + 544 + ], + "spans": [ + { + "bbox": [ + 78, + 532, + 295, + 544 + ], + "type": "interline_equation", + "content": "\\mathbf {I} _ {S R} = F _ {\\text {P i x e l S h u f f l e}} \\left(F _ {\\text {C o n v 3} \\times 3} \\left(\\mathbf {I} _ {d} + \\mathbf {I} _ {s}\\right)\\right), \\tag {12}", + "image_path": "315b1535dd1f00ca6ee3afb2d0d04420e97751c2f72c52d889169d1533ff716f.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 55, + 552, + 295, + 601 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 552, + 295, + 601 + ], + "spans": [ + { + "bbox": [ + 55, + 552, + 295, + 601 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 55, + 552, + 295, + 601 + ], + "type": "inline_equation", + "content": "\\mathbf{I}_d" + }, + { + "bbox": [ + 55, + 552, + 295, + 601 + ], + "type": "text", + "content": " denotes the deep feature obtained by the stacked SGSDBs, and " + }, + { + "bbox": [ + 55, + 552, + 295, + 601 + ], + "type": "inline_equation", + "content": "F_{Conv3\\times 3}(\\cdot)" + }, + { + "bbox": [ + 55, + 552, + 295, + 601 + ], + "type": "text", + "content": " indicates the " + }, + { + "bbox": [ + 55, + 552, + 295, + 601 + ], + "type": "inline_equation", + "content": "3\\times 3" + }, + { + "bbox": [ + 55, + 552, + 295, + 601 + ], + "type": "text", + "content": " standard convolution layer. " + }, + { + "bbox": [ + 55, + 552, + 295, + 601 + ], + "type": "inline_equation", + "content": "F_{PixelShuffle}(\\cdot)" + }, + { + "bbox": [ + 55, + 552, + 295, + 601 + ], + "type": "text", + "content": " is used to upscale the final feature and output the SR reconstructed image " + }, + { + "bbox": [ + 55, + 552, + 295, + 601 + ], + "type": "inline_equation", + "content": "\\mathbf{I}_{SR}" + }, + { + "bbox": [ + 55, + 552, + 295, + 601 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 55, + 601, + 296, + 648 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 601, + 296, + 648 + ], + "spans": [ + { + "bbox": [ + 55, + 601, + 296, + 648 + ], + "type": "text", + "content": "Finally, to train the network, they use the " + }, + { + "bbox": [ + 55, + 601, + 296, + 648 + ], + "type": "inline_equation", + "content": "L_{1}" + }, + { + "bbox": [ + 55, + 601, + 296, + 648 + ], + "type": "text", + "content": " loss function to minimize the pixel-level difference between the ground truth image " + }, + { + "bbox": [ + 55, + 601, + 296, + 648 + ], + "type": "inline_equation", + "content": "\\mathbf{I}_{GT}" + }, + { + "bbox": [ + 55, + 601, + 296, + 648 + ], + "type": "text", + "content": " and the reconstructed image " + }, + { + "bbox": [ + 55, + 601, + 296, + 648 + ], + "type": "inline_equation", + "content": "\\mathbf{I}_{SR}" + }, + { + "bbox": [ + 55, + 601, + 296, + 648 + ], + "type": "text", + "content": ", which can be expressed as:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 130, + 656, + 295, + 670 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 656, + 295, + 670 + ], + "spans": [ + { + "bbox": [ + 130, + 656, + 295, + 670 + ], + "type": "interline_equation", + "content": "L _ {1} = \\left\\| \\mathbf {I} _ {S R} - \\mathbf {I} _ {G T} \\right\\| _ {1}, \\tag {13}", + "image_path": "bdc0342fa5b6993dbb6a400dc2c09954c71f33a178c369230a05d096b5bac363.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 55, + 677, + 296, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 677, + 296, + 714 + ], + "spans": [ + { + "bbox": [ + 55, + 677, + 296, + 714 + ], + "type": "text", + "content": "At the same time, they notice that only using the pixelwise loss function can not effectively generate more high-frequency details [15]. Thus, they accordingly employ a" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 72, + 553, + 96 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 553, + 96 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 553, + 96 + ], + "type": "text", + "content": "frequency constraint to regularize network training. The adopted loss function for the network training is defined as" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 359, + 102, + 553, + 114 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 359, + 102, + 553, + 114 + ], + "spans": [ + { + "bbox": [ + 359, + 102, + 553, + 114 + ], + "type": "interline_equation", + "content": "L = L _ {1} + \\lambda \\| \\mathcal {F} (\\mathbf {I} _ {S R}) - \\mathcal {F} (\\mathbf {I} _ {G T}) \\|. \\tag {14}", + "image_path": "fa4aa7709eb7b2896ba4b086f4d265ee0a67592ca4ec36cd0bec84ce22f39b53.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 121, + 553, + 144 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 121, + 553, + 144 + ], + "spans": [ + { + "bbox": [ + 313, + 121, + 553, + 144 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 313, + 121, + 553, + 144 + ], + "type": "inline_equation", + "content": "\\mathcal{F}" + }, + { + "bbox": [ + 313, + 121, + 553, + 144 + ], + "type": "text", + "content": " represents the Fast Fourier Transform, and " + }, + { + "bbox": [ + 313, + 121, + 553, + 144 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 313, + 121, + 553, + 144 + ], + "type": "text", + "content": " is a weight parameter which is empirically set to 0.1." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 144, + 555, + 299 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 144, + 555, + 299 + ], + "spans": [ + { + "bbox": [ + 313, + 144, + 555, + 299 + ], + "type": "text", + "content": "Spatial-gate modulation module Considering that the reason why the ViT-based model performs well is that SA explores non-local information and expands the effective receptive field of the model. They develop a lightweight spatial-gate modulation (SGM) module to collaboratively extract representative features, where the SAL branch exploits non-local features in a larger receptive field by integrating the dilated depth-wise convolutional layers with horizontal and vertical 1-D kernels, and the LKG branch captures local features in parallel. Moreover, to avoid potential block artifacts aroused by dilation, they adopt the gate mechanism to recalibrate the generated feature maps adaptively, as shown in Fig. 25." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 299, + 554, + 359 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 299, + 554, + 359 + ], + "spans": [ + { + "bbox": [ + 313, + 299, + 554, + 359 + ], + "type": "text", + "content": "Given the input feature " + }, + { + "bbox": [ + 313, + 299, + 554, + 359 + ], + "type": "inline_equation", + "content": "\\mathbf{I}_{in} \\in R^{C \\times H \\times W}" + }, + { + "bbox": [ + 313, + 299, + 554, + 359 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 313, + 299, + 554, + 359 + ], + "type": "inline_equation", + "content": "H \\times W" + }, + { + "bbox": [ + 313, + 299, + 554, + 359 + ], + "type": "text", + "content": " denotes the spatial size and " + }, + { + "bbox": [ + 313, + 299, + 554, + 359 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 313, + 299, + 554, + 359 + ], + "type": "text", + "content": " is the number of channels. Specifically, they first apply a normalization layer and a point-by-point convolution to normalize information and expand the channel." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 370, + 365, + 553, + 378 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 370, + 365, + 553, + 378 + ], + "spans": [ + { + "bbox": [ + 370, + 365, + 553, + 378 + ], + "type": "interline_equation", + "content": "\\mathbf {I} _ {1} = F _ {\\text {C o n v 1} \\times 1} \\left(F _ {\\text {N o r m}} \\left(\\mathbf {I} _ {\\text {i n}}\\right)\\right), \\tag {15}", + "image_path": "6401cbf20b4191d272941b585c8d4f0adc6a86032cd4277e954bb6298446d34b.jpg" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 313, + 383, + 554, + 443 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 383, + 554, + 443 + ], + "spans": [ + { + "bbox": [ + 313, + 383, + 554, + 443 + ], + "type": "text", + "content": "where, " + }, + { + "bbox": [ + 313, + 383, + 554, + 443 + ], + "type": "inline_equation", + "content": "F_{Norm}" + }, + { + "bbox": [ + 313, + 383, + 554, + 443 + ], + "type": "text", + "content": " represents the " + }, + { + "bbox": [ + 313, + 383, + 554, + 443 + ], + "type": "inline_equation", + "content": "L_2" + }, + { + "bbox": [ + 313, + 383, + 554, + 443 + ], + "type": "text", + "content": " normalization and " + }, + { + "bbox": [ + 313, + 383, + 554, + 443 + ], + "type": "inline_equation", + "content": "F_{Conv1\\times 1}" + }, + { + "bbox": [ + 313, + 383, + 554, + 443 + ], + "type": "text", + "content": " denotes a " + }, + { + "bbox": [ + 313, + 383, + 554, + 443 + ], + "type": "inline_equation", + "content": "1\\times 1" + }, + { + "bbox": [ + 313, + 383, + 554, + 443 + ], + "type": "text", + "content": " convolutional layer, " + }, + { + "bbox": [ + 313, + 383, + 554, + 443 + ], + "type": "inline_equation", + "content": "\\mathbf{I}_1\\in R^{2C\\times H\\times W}" + }, + { + "bbox": [ + 313, + 383, + 554, + 443 + ], + "type": "text", + "content": ". Subsequently, the obtained features " + }, + { + "bbox": [ + 313, + 383, + 554, + 443 + ], + "type": "inline_equation", + "content": "\\mathbf{I}_1" + }, + { + "bbox": [ + 313, + 383, + 554, + 443 + ], + "type": "text", + "content": " are split into two parts along the channel dimension, this process can be expressed as:" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 388, + 449, + 553, + 462 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 388, + 449, + 553, + 462 + ], + "spans": [ + { + "bbox": [ + 388, + 449, + 553, + 462 + ], + "type": "interline_equation", + "content": "\\mathbf {I} _ {x}, \\mathbf {I} _ {y} = F _ {S} \\left(F _ {G} \\left(\\mathbf {I} _ {1}\\right)\\right), \\tag {16}", + "image_path": "de74ce1b8f0633eda8ca59b948c9f77773cbcc804b10a76ea45571ce864aee05.jpg" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 313, + 467, + 554, + 598 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 467, + 554, + 598 + ], + "spans": [ + { + "bbox": [ + 313, + 467, + 554, + 598 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 313, + 467, + 554, + 598 + ], + "type": "inline_equation", + "content": "F_{G}" + }, + { + "bbox": [ + 313, + 467, + 554, + 598 + ], + "type": "text", + "content": " denotes the GELU activation function [38], " + }, + { + "bbox": [ + 313, + 467, + 554, + 598 + ], + "type": "inline_equation", + "content": "F_{S}" + }, + { + "bbox": [ + 313, + 467, + 554, + 598 + ], + "type": "text", + "content": " denotes a channel splitting operation, " + }, + { + "bbox": [ + 313, + 467, + 554, + 598 + ], + "type": "inline_equation", + "content": "\\mathbf{I}_x \\in R^{C \\times H \\times W}" + }, + { + "bbox": [ + 313, + 467, + 554, + 598 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 467, + 554, + 598 + ], + "type": "inline_equation", + "content": "\\mathbf{I}_y \\in R^{C \\times H \\times W}" + }, + { + "bbox": [ + 313, + 467, + 554, + 598 + ], + "type": "text", + "content": ". They then process the features " + }, + { + "bbox": [ + 313, + 467, + 554, + 598 + ], + "type": "inline_equation", + "content": "\\mathbf{I}_x" + }, + { + "bbox": [ + 313, + 467, + 554, + 598 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 467, + 554, + 598 + ], + "type": "inline_equation", + "content": "\\mathbf{I}_y" + }, + { + "bbox": [ + 313, + 467, + 554, + 598 + ], + "type": "text", + "content": " in parallel via the SAL and LKG branches, producing the non-local feature " + }, + { + "bbox": [ + 313, + 467, + 554, + 598 + ], + "type": "inline_equation", + "content": "\\mathbf{I}_n" + }, + { + "bbox": [ + 313, + 467, + 554, + 598 + ], + "type": "text", + "content": " and local feature " + }, + { + "bbox": [ + 313, + 467, + 554, + 598 + ], + "type": "inline_equation", + "content": "\\mathbf{I}_l" + }, + { + "bbox": [ + 313, + 467, + 554, + 598 + ], + "type": "text", + "content": ", respectively. It is worth mentioning that the SAL and LKG branches only need to be responsible for half the input signals, and the parallel processing is faster. Finally, they fuse the non-local feature " + }, + { + "bbox": [ + 313, + 467, + 554, + 598 + ], + "type": "inline_equation", + "content": "\\mathbf{I}_n" + }, + { + "bbox": [ + 313, + 467, + 554, + 598 + ], + "type": "text", + "content": " and local feature " + }, + { + "bbox": [ + 313, + 467, + 554, + 598 + ], + "type": "inline_equation", + "content": "\\mathbf{I}_l" + }, + { + "bbox": [ + 313, + 467, + 554, + 598 + ], + "type": "text", + "content": " together with channel concatenation to form a representative output of the SGM module. This process can be expressed as," + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 390, + 604, + 553, + 617 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 390, + 604, + 553, + 617 + ], + "spans": [ + { + "bbox": [ + 390, + 604, + 553, + 617 + ], + "type": "interline_equation", + "content": "\\mathbf {I} _ {S G M} = F _ {C} \\left(\\mathbf {I} _ {n}, \\mathbf {I} _ {l}\\right), \\tag {17}", + "image_path": "629c7ce13a1f3e7c8602c026f3e125d7a476ca88f8ecc455afd74783f7e65b16.jpg" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 313, + 623, + 553, + 646 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 623, + 553, + 646 + ], + "spans": [ + { + "bbox": [ + 313, + 623, + 553, + 646 + ], + "type": "text", + "content": "where, " + }, + { + "bbox": [ + 313, + 623, + 553, + 646 + ], + "type": "inline_equation", + "content": "\\mathbf{I}_{DSG}" + }, + { + "bbox": [ + 313, + 623, + 553, + 646 + ], + "type": "text", + "content": " is the output feature and " + }, + { + "bbox": [ + 313, + 623, + 553, + 646 + ], + "type": "inline_equation", + "content": "F_{C}(\\cdot)" + }, + { + "bbox": [ + 313, + 623, + 553, + 646 + ], + "type": "text", + "content": " is the channel cascade operation." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 313, + 647, + 553, + 681 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 647, + 553, + 681 + ], + "spans": [ + { + "bbox": [ + 313, + 647, + 553, + 681 + ], + "type": "text", + "content": "SA-like branch They exploit non-local features in a larger receptive field by integrating the dilated depth-wise convolutional layers with horizontal and vertical 1-D kernels." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 351, + 688, + 553, + 715 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 351, + 688, + 553, + 715 + ], + "spans": [ + { + "bbox": [ + 351, + 688, + 553, + 715 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathbf {I} _ {o} = F _ {D ^ {3} W C o n v 5 \\times 1 1} \\left(F _ {D W C o n v 5 \\times 1} \\right. \\tag {18} \\\\ \\left(F _ {D ^ {3} W C o n v 1 \\times 1 1} \\left(F _ {D W C o n v 1 \\times 5} (\\mathbf {I} _ {m})\\right)\\right)) \\\\ \\end{array}", + "image_path": "7b0c3a3313a71ca32c35419030fc37f77e748481cabac6389750f2cfefd13a40.jpg" + } + ] + } + ], + "index": 24 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 732, + 310, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 732, + 310, + 741 + ], + "spans": [ + { + "bbox": [ + 299, + 732, + 310, + 741 + ], + "type": "text", + "content": "25" + } + ] + } + ], + "index": 25 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 24 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 296, + 301 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 296, + 301 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 296, + 301 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 55, + 72, + 296, + 301 + ], + "type": "inline_equation", + "content": "F_{DWConv1 \\times 5}(\\cdot)" + }, + { + "bbox": [ + 55, + 72, + 296, + 301 + ], + "type": "text", + "content": " denotes the DWConv layer with a kernel of size " + }, + { + "bbox": [ + 55, + 72, + 296, + 301 + ], + "type": "inline_equation", + "content": "1 \\times 5" + }, + { + "bbox": [ + 55, + 72, + 296, + 301 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 55, + 72, + 296, + 301 + ], + "type": "inline_equation", + "content": "F_{D^3 WConv1 \\times 11}(\\cdot)" + }, + { + "bbox": [ + 55, + 72, + 296, + 301 + ], + "type": "text", + "content": " signifies the DWConv layer with a kernel of size " + }, + { + "bbox": [ + 55, + 72, + 296, + 301 + ], + "type": "inline_equation", + "content": "1 \\times 11" + }, + { + "bbox": [ + 55, + 72, + 296, + 301 + ], + "type": "text", + "content": " and the dilated factor is set to 3, " + }, + { + "bbox": [ + 55, + 72, + 296, + 301 + ], + "type": "inline_equation", + "content": "F_{DWConv5 \\times 1}(\\cdot)" + }, + { + "bbox": [ + 55, + 72, + 296, + 301 + ], + "type": "text", + "content": " denotes the DWConv layer with a kernel of size " + }, + { + "bbox": [ + 55, + 72, + 296, + 301 + ], + "type": "inline_equation", + "content": "5 \\times 1" + }, + { + "bbox": [ + 55, + 72, + 296, + 301 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 55, + 72, + 296, + 301 + ], + "type": "inline_equation", + "content": "F_{D^3 WConv11 \\times 1}(\\cdot)" + }, + { + "bbox": [ + 55, + 72, + 296, + 301 + ], + "type": "text", + "content": " signifies the DWConv layer with a kernel of size " + }, + { + "bbox": [ + 55, + 72, + 296, + 301 + ], + "type": "inline_equation", + "content": "11 \\times 1" + }, + { + "bbox": [ + 55, + 72, + 296, + 301 + ], + "type": "text", + "content": " and the dilated factor is set to 3. Given that increasing the convolution kernel directly will greatly increase the parameter and computation amount, as well as increase the inference time of the model, whereas utilizing the dilated depth-wise convolutional layers with horizontal and vertical 1-D kernels will alleviate the problem. In this way, the information extraction capability of the convolutional layer is further enhanced without greatly increasing the number of computations. Moreover, to avoid potential block artifacts arising from dilation, they adopt the gate mechanism to recalibrate the generated feature maps adaptively. Finally, they use a " + }, + { + "bbox": [ + 55, + 72, + 296, + 301 + ], + "type": "inline_equation", + "content": "1 \\times 1" + }, + { + "bbox": [ + 55, + 72, + 296, + 301 + ], + "type": "text", + "content": " convolution to distill the output feature for extracting the representative structure information " + }, + { + "bbox": [ + 55, + 72, + 296, + 301 + ], + "type": "inline_equation", + "content": "\\mathbf{I}_n" + }, + { + "bbox": [ + 55, + 72, + 296, + 301 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 124, + 309, + 295, + 323 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 124, + 309, + 295, + 323 + ], + "spans": [ + { + "bbox": [ + 124, + 309, + 295, + 323 + ], + "type": "interline_equation", + "content": "\\mathbf {I} _ {n} = F _ {\\text {C o n v 1} \\times 1} \\left(\\mathbf {I} _ {o} * \\mathbf {I} _ {y}\\right) \\tag {19}", + "image_path": "fb65ecdd10ff6c4921517e12c627c869473455c9ffd3e49a76b9808f00709b64.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 332, + 280, + 343 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 332, + 280, + 343 + ], + "spans": [ + { + "bbox": [ + 55, + 332, + 280, + 343 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 55, + 332, + 280, + 343 + ], + "type": "inline_equation", + "content": "*" + }, + { + "bbox": [ + 55, + 332, + 280, + 343 + ], + "type": "text", + "content": " represents the element-wise product operation." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 344, + 296, + 475 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 344, + 296, + 475 + ], + "spans": [ + { + "bbox": [ + 55, + 344, + 296, + 475 + ], + "type": "text", + "content": "Local spatial-gate branch Local details are important for the pleasing high-frequency reconstruction. As the SAL branch prioritizes non-local structure information exploration, they develop a simple local spatial-gate branch to capture local features simultaneously. In detail, a " + }, + { + "bbox": [ + 55, + 344, + 296, + 475 + ], + "type": "inline_equation", + "content": "3 \\times 3" + }, + { + "bbox": [ + 55, + 344, + 296, + 475 + ], + "type": "text", + "content": " depth-wise convolution is used to encode local information from the input features " + }, + { + "bbox": [ + 55, + 344, + 296, + 475 + ], + "type": "inline_equation", + "content": "\\mathbf{I}_x" + }, + { + "bbox": [ + 55, + 344, + 296, + 475 + ], + "type": "text", + "content": ". Then, they use the gate mechanism to generate the enhanced local feature. Finally, they use a " + }, + { + "bbox": [ + 55, + 344, + 296, + 475 + ], + "type": "inline_equation", + "content": "1 \\times 1" + }, + { + "bbox": [ + 55, + 344, + 296, + 475 + ], + "type": "text", + "content": " convolution with a GELU activation to distill the output features for extracting the representative detail information " + }, + { + "bbox": [ + 55, + 344, + 296, + 475 + ], + "type": "inline_equation", + "content": "\\mathbf{I}_l" + }, + { + "bbox": [ + 55, + 344, + 296, + 475 + ], + "type": "text", + "content": ", which is achieved by," + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 115, + 484, + 294, + 503 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 484, + 294, + 503 + ], + "spans": [ + { + "bbox": [ + 115, + 484, + 294, + 503 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathbf {I} _ {o} = F _ {D W C o n v 3 \\times 3} (\\mathbf {I} _ {x}) * \\mathbf {I} _ {y}, \\\\ \\mathbf {I} _ {o} = F _ {D W C o n v 3 \\times 3} (\\mathbf {I} _ {x}). \\end{array} \\tag {20}", + "image_path": "c451da8a344672c77b071621cc948448afa145e2ceeb268b5c838603f9423ea7.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 117, + 499, + 219, + 512 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 499, + 219, + 512 + ], + "spans": [ + { + "bbox": [ + 117, + 499, + 219, + 512 + ], + "type": "interline_equation", + "content": "\\mathbf {I} _ {l} = F _ {G} \\left(F _ {\\text {C o n v 1} \\times 1} \\left(\\mathbf {I} _ {o}\\right)\\right)", + "image_path": "52d45502cb2dd3aee6acddbf1a133f0ba433b74e9d56e8aac335b471219a8fa6.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 521, + 295, + 556 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 521, + 295, + 556 + ], + "spans": [ + { + "bbox": [ + 55, + 521, + 295, + 556 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 55, + 521, + 295, + 556 + ], + "type": "inline_equation", + "content": "F_{DWConv3 \\times 3}(\\cdot)" + }, + { + "bbox": [ + 55, + 521, + 295, + 556 + ], + "type": "text", + "content": " denotes the DWConv layer with a kernel of size " + }, + { + "bbox": [ + 55, + 521, + 295, + 556 + ], + "type": "inline_equation", + "content": "3 \\times 3" + }, + { + "bbox": [ + 55, + 521, + 295, + 556 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 55, + 521, + 295, + 556 + ], + "type": "inline_equation", + "content": "F_{G}" + }, + { + "bbox": [ + 55, + 521, + 295, + 556 + ], + "type": "text", + "content": " represents GELU activation function." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 558, + 296, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 558, + 296, + 689 + ], + "spans": [ + { + "bbox": [ + 55, + 558, + 296, + 689 + ], + "type": "text", + "content": "Enhanced self-distillation module They present an enhanced self-distillation (ESD) module to expand and refine the features derived from the SGM in spatial and channel dimensions further. The ESD uses a " + }, + { + "bbox": [ + 55, + 558, + 296, + 689 + ], + "type": "inline_equation", + "content": "3 \\times 3" + }, + { + "bbox": [ + 55, + 558, + 296, + 689 + ], + "type": "text", + "content": " depth-wise convolutional to expand spatial and channel information. Then they use the GLUE activation function to introduce nonlinearity and extend the representation of the network. Finally, the output features are fed into a " + }, + { + "bbox": [ + 55, + 558, + 296, + 689 + ], + "type": "inline_equation", + "content": "1 \\times 1" + }, + { + "bbox": [ + 55, + 558, + 296, + 689 + ], + "type": "text", + "content": " convolution for further feature mixing and reducing the hidden channel back to the original input dimension. Given the input feature " + }, + { + "bbox": [ + 55, + 558, + 296, + 689 + ], + "type": "inline_equation", + "content": "\\mathbf{I}_{in} \\in R^{C \\times H \\times W}" + }, + { + "bbox": [ + 55, + 558, + 296, + 689 + ], + "type": "text", + "content": ", this process can be formulated as," + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 90, + 700, + 295, + 715 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 90, + 700, + 295, + 715 + ], + "spans": [ + { + "bbox": [ + 90, + 700, + 295, + 715 + ], + "type": "interline_equation", + "content": "\\mathbf {I} _ {l} = F _ {\\text {C o n v 1} \\times 1} \\left(F _ {G} \\left(F _ {\\text {D W C o n v 3} \\times 3} \\left(\\mathbf {I} _ {i n}\\right)\\right)\\right) \\tag {21}", + "image_path": "fb61cf2bd326fc5f88d5a3f8a510a81318ceffc6d1176cced4db306d4281519c.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 72, + 553, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 553, + 156 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 553, + 156 + ], + "type": "text", + "content": "Training Details. Following previous works [66], they use the DF2K dataset, which consists of 800 images from DIV2K [4] and 2650 images from Flickr2K [70] as the training dataset. A sliding window slicing operation is used to decompose each HR image into " + }, + { + "bbox": [ + 313, + 72, + 553, + 156 + ], + "type": "inline_equation", + "content": "480 \\times 480" + }, + { + "bbox": [ + 313, + 72, + 553, + 156 + ], + "type": "text", + "content": " patches for training. The LR images are obtained by downsampling the HR images using the MATLAB bicubic kernel function." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 157, + 554, + 324 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 157, + 554, + 324 + ], + "spans": [ + { + "bbox": [ + 313, + 157, + 554, + 324 + ], + "type": "text", + "content": "During the training, random rotation and horizontal flipping are used for data augmentation. The proposed SGSDN has 8 SGSDBs, in which the number of feature channels is set to 24. They start by pretraining the model on the DIV2K and Flickr2K datasets. The mini-batch size is set to 64. The model is trained by the ADAN optimizer [124] with " + }, + { + "bbox": [ + 313, + 157, + 554, + 324 + ], + "type": "inline_equation", + "content": "\\beta_{1} = 0.98" + }, + { + "bbox": [ + 313, + 157, + 554, + 324 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 313, + 157, + 554, + 324 + ], + "type": "inline_equation", + "content": "\\beta_{2} = 0.92" + }, + { + "bbox": [ + 313, + 157, + 554, + 324 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 157, + 554, + 324 + ], + "type": "inline_equation", + "content": "\\beta_{3} = 0.99" + }, + { + "bbox": [ + 313, + 157, + 554, + 324 + ], + "type": "text", + "content": ", and the exponential moving average (EMA) is set to 0.999 to stabilize training. The initial and minimum learning rates are set to " + }, + { + "bbox": [ + 313, + 157, + 554, + 324 + ], + "type": "inline_equation", + "content": "5 \\times 10^{-3}" + }, + { + "bbox": [ + 313, + 157, + 554, + 324 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 157, + 554, + 324 + ], + "type": "inline_equation", + "content": "1 \\times 10^{-6}" + }, + { + "bbox": [ + 313, + 157, + 554, + 324 + ], + "type": "text", + "content": ", respectively, and decay according to cosine learning rate. The model is optimized using a combination of the " + }, + { + "bbox": [ + 313, + 157, + 554, + 324 + ], + "type": "inline_equation", + "content": "L_{1}" + }, + { + "bbox": [ + 313, + 157, + 554, + 324 + ], + "type": "text", + "content": " loss and an FFT-based frequency loss function [15] for a total of " + }, + { + "bbox": [ + 313, + 157, + 554, + 324 + ], + "type": "inline_equation", + "content": "1 \\times 10^{6}" + }, + { + "bbox": [ + 313, + 157, + 554, + 324 + ], + "type": "text", + "content": " iterations. The size of the randomly cropped LR patches is " + }, + { + "bbox": [ + 313, + 157, + 554, + 324 + ], + "type": "inline_equation", + "content": "64 \\times 64" + }, + { + "bbox": [ + 313, + 157, + 554, + 324 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 325, + 554, + 421 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 325, + 554, + 421 + ], + "spans": [ + { + "bbox": [ + 313, + 325, + 554, + 421 + ], + "type": "text", + "content": "They then conduct fine-tuning on the DIV2K dataset and the first 10k images from LSDIR [64]. The input size is set to " + }, + { + "bbox": [ + 313, + 325, + 554, + 421 + ], + "type": "inline_equation", + "content": "96 \\times 96" + }, + { + "bbox": [ + 313, + 325, + 554, + 421 + ], + "type": "text", + "content": ", with a batch size of 32. The fine-tuning process optimizes the model by starting with an initial learning rate of " + }, + { + "bbox": [ + 313, + 325, + 554, + 421 + ], + "type": "inline_equation", + "content": "3 \\times 10^{-3}" + }, + { + "bbox": [ + 313, + 325, + 554, + 421 + ], + "type": "text", + "content": ", while keeping the rest consistent with pretraining. The fine-tuning phase encompasses a total of 100k iterations. They implemented our model on an NVIDIA RTX 3090 GPU using Pytorch." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 314, + 430, + 382, + 441 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 430, + 382, + 441 + ], + "spans": [ + { + "bbox": [ + 314, + 430, + 382, + 441 + ], + "type": "text", + "content": "4.24. NanoSR" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 449, + 554, + 568 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 449, + 554, + 568 + ], + "spans": [ + { + "bbox": [ + 313, + 449, + 554, + 568 + ], + "type": "text", + "content": "Network Architecture. Their network architecture is inspired by SPAN [112] and PAN [142]. While maintaining the overall design of SPAN, they replace the SPAB block with the RepBlock. The RepBlock consists of a feature extractor using reparameterized convolution and a reparameterized pixel attention module. During training, the RepBlock operates in a complex mode to achieve better quality performance but can be equivalently transformed into a simple mode with fewer parameters and FLOPs. The detailed network architecture is illustrated in Fig. 26." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 569, + 554, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 569, + 554, + 665 + ], + "spans": [ + { + "bbox": [ + 313, + 569, + 554, + 665 + ], + "type": "text", + "content": "Reparameterized Convolution. Reparameterized convolution plays a crucial role in improving the performance of efficient CNN-based super-resolution networks. They employ the RepMBCov introduced in PlainUSR [120], and this RepMBCov forms all the convolutions in the RepBlock. In addition, RepMBCov is derived from MobileNetV3 [39] Block (MBConv). The architecture of RepMBCov is depicted in Fig. 27." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 665, + 554, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 665, + 554, + 714 + ], + "spans": [ + { + "bbox": [ + 313, + 665, + 554, + 714 + ], + "type": "text", + "content": "Implementation Details. They train the model using all 85,791 image pairs from the DIV2K and LSDIR datasets. Each image pair is cropped into " + }, + { + "bbox": [ + 313, + 665, + 554, + 714 + ], + "type": "inline_equation", + "content": "480 \\times 480" + }, + { + "bbox": [ + 313, + 665, + 554, + 714 + ], + "type": "text", + "content": " sub-patches for training. During each training batch, 64 HR RGB patches" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 732, + 311, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 732, + 311, + 742 + ], + "spans": [ + { + "bbox": [ + 299, + 732, + 311, + 742 + ], + "type": "text", + "content": "26" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 25 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 88, + 93, + 541, + 340 + ], + "blocks": [ + { + "bbox": [ + 88, + 93, + 541, + 340 + ], + "lines": [ + { + "bbox": [ + 88, + 93, + 541, + 340 + ], + "spans": [ + { + "bbox": [ + 88, + 93, + 541, + 340 + ], + "type": "image", + "image_path": "4b7b31467018d189fc76958a39af5df580c215fa4fa8a574934ee8b7d2b699dd.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 186, + 354, + 425, + 365 + ], + "lines": [ + { + "bbox": [ + 186, + 354, + 425, + 365 + ], + "spans": [ + { + "bbox": [ + 186, + 354, + 425, + 365 + ], + "type": "text", + "content": "Figure 26. Team NanoSR: The network architecture of RepRLFN" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 101, + 401, + 251, + 585 + ], + "blocks": [ + { + "bbox": [ + 101, + 401, + 251, + 585 + ], + "lines": [ + { + "bbox": [ + 101, + 401, + 251, + 585 + ], + "spans": [ + { + "bbox": [ + 101, + 401, + 251, + 585 + ], + "type": "image", + "image_path": "f41c0d497ea15477ece5f1fc75a7c7d46d314cd74ff65e50caa4891ff1ad9ef1.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 56, + 604, + 295, + 616 + ], + "lines": [ + { + "bbox": [ + 56, + 604, + 295, + 616 + ], + "spans": [ + { + "bbox": [ + 56, + 604, + 295, + 616 + ], + "type": "text", + "content": "Figure 27. Team NanoSR: The network architecture of RepRLFN" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 641, + 296, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 641, + 296, + 715 + ], + "spans": [ + { + "bbox": [ + 55, + 641, + 296, + 715 + ], + "type": "text", + "content": "of size " + }, + { + "bbox": [ + 55, + 641, + 296, + 715 + ], + "type": "inline_equation", + "content": "128 \\times 128" + }, + { + "bbox": [ + 55, + 641, + 296, + 715 + ], + "type": "text", + "content": " are randomly cropped and augmented with random flipping and rotation. The optimization objective is the " + }, + { + "bbox": [ + 55, + 641, + 296, + 715 + ], + "type": "inline_equation", + "content": "\\ell_1" + }, + { + "bbox": [ + 55, + 641, + 296, + 715 + ], + "type": "text", + "content": " loss, and they use the AdamW optimizer (" + }, + { + "bbox": [ + 55, + 641, + 296, + 715 + ], + "type": "inline_equation", + "content": "\\beta_{1} = 0.9" + }, + { + "bbox": [ + 55, + 641, + 296, + 715 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 55, + 641, + 296, + 715 + ], + "type": "inline_equation", + "content": "\\beta_{2} = 0.99" + }, + { + "bbox": [ + 55, + 641, + 296, + 715 + ], + "type": "text", + "content": ") to train NanoSR. The learning rate is initialized at " + }, + { + "bbox": [ + 55, + 641, + 296, + 715 + ], + "type": "inline_equation", + "content": "5 \\times 10^{-4}" + }, + { + "bbox": [ + 55, + 641, + 296, + 715 + ], + "type": "text", + "content": " and halved at " + }, + { + "bbox": [ + 55, + 641, + 296, + 715 + ], + "type": "inline_equation", + "content": "\\{250\\mathrm{k}, 400\\mathrm{k}, 450\\mathrm{k}, 475\\mathrm{k}\\}" + }, + { + "bbox": [ + 55, + 641, + 296, + 715 + ], + "type": "text", + "content": " iterations within a total of " + }, + { + "bbox": [ + 55, + 641, + 296, + 715 + ], + "type": "inline_equation", + "content": "500\\mathrm{k}" + }, + { + "bbox": [ + 55, + 641, + 296, + 715 + ], + "type": "text", + "content": " iterations. The proposed" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 313, + 386, + 555, + 410 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 386, + 555, + 410 + ], + "spans": [ + { + "bbox": [ + 313, + 386, + 555, + 410 + ], + "type": "text", + "content": "method is implemented using the PyTorch framework on a single NVIDIA RTX 4090 GPU." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 313, + 464, + 463, + 478 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 464, + 463, + 478 + ], + "spans": [ + { + "bbox": [ + 313, + 464, + 463, + 478 + ], + "type": "text", + "content": "4.25. MegastudyEdu_Vision.AI" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 313, + 498, + 555, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 498, + 555, + 715 + ], + "spans": [ + { + "bbox": [ + 313, + 498, + 555, + 715 + ], + "type": "text", + "content": "General Method Description. To effectively model long-range dependency and extensive receptive field, inspired by CFSR [122], they propose the multi-scale aggregation attention network (MAAN), as illustrated in Fig. 28. MAAN reconstructs high-quality images through a shallow feature extractor, a stack of three residual multi-scale aggregation blocks (RMAB) composed of multi-scale aggregation attention layers (MAAL), a large separable kernel attention tail (LSKAT), and an image reconstruction module. Specially, MAAL captures global and local details via a multi-scale mixer and efficient feed-forward network (EFN) [122]. Given a low-resolution input image " + }, + { + "bbox": [ + 313, + 498, + 555, + 715 + ], + "type": "inline_equation", + "content": "I_{LR} \\in \\mathbb{R}^{3 \\times H \\times W}" + }, + { + "bbox": [ + 313, + 498, + 555, + 715 + ], + "type": "text", + "content": ", shallow features such as edges, textures, and fine details are extracted using a " + }, + { + "bbox": [ + 313, + 498, + 555, + 715 + ], + "type": "inline_equation", + "content": "3 \\times 3" + }, + { + "bbox": [ + 313, + 498, + 555, + 715 + ], + "type": "text", + "content": " convolution in the shallow feature extraction stage and passed to the MAAL. As shown in Fig. 28, the MAAL processing pipeline begins with an input " + }, + { + "bbox": [ + 313, + 498, + 555, + 715 + ], + "type": "inline_equation", + "content": "X" + }, + { + "bbox": [ + 313, + 498, + 555, + 715 + ], + "type": "text", + "content": ", applying layer normalization, followed by a " + }, + { + "bbox": [ + 313, + 498, + 555, + 715 + ], + "type": "inline_equation", + "content": "1 \\times 1" + }, + { + "bbox": [ + 313, + 498, + 555, + 715 + ], + "type": "text", + "content": " convolution and splitting the feature map into four groups" + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 732, + 311, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 732, + 311, + 742 + ], + "spans": [ + { + "bbox": [ + 299, + 732, + 311, + 742 + ], + "type": "text", + "content": "27" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 26 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 60, + 70, + 548, + 259 + ], + "blocks": [ + { + "bbox": [ + 60, + 70, + 548, + 259 + ], + "lines": [ + { + "bbox": [ + 60, + 70, + 548, + 259 + ], + "spans": [ + { + "bbox": [ + 60, + 70, + 548, + 259 + ], + "type": "image", + "image_path": "77c96b623860559eacc549d8e973b55a52ac782e82292d36ce71b6afab9761ca.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 124, + 266, + 485, + 277 + ], + "lines": [ + { + "bbox": [ + 124, + 266, + 485, + 277 + ], + "spans": [ + { + "bbox": [ + 124, + 266, + 485, + 277 + ], + "type": "text", + "content": "Figure 28. Team MegastudyEdu_Vision.AI: Overview of multi-scale aggregation attention network." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 299, + 176, + 310 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 299, + 176, + 310 + ], + "spans": [ + { + "bbox": [ + 55, + 299, + 176, + 310 + ], + "type": "text", + "content": "along the channel dimension:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 160, + 319, + 240, + 331 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 319, + 240, + 331 + ], + "spans": [ + { + "bbox": [ + 160, + 319, + 240, + 331 + ], + "type": "interline_equation", + "content": "V = \\operatorname {C o n v} _ {1 \\times 1} (X),", + "image_path": "084bf72c1241e932922747ff09975d59d3e31eba791d72306bc8b49497123eb2.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 146, + 334, + 294, + 352 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 146, + 334, + 294, + 352 + ], + "spans": [ + { + "bbox": [ + 146, + 334, + 294, + 352 + ], + "type": "interline_equation", + "content": "F _ {\\text {g a t e}} = \\operatorname {C o n v} _ {1 \\times 1} (X), \\tag {22}", + "image_path": "488b1bb587aaf16cfb309ec09954a5b493357a03ceefef34c9d2e6752fd2a5c6.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 63, + 349, + 272, + 376 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 349, + 272, + 376 + ], + "spans": [ + { + "bbox": [ + 63, + 349, + 272, + 376 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} F _ {i d}, F _ {g a t e 1}, F _ {g a t e 2}, F _ {g a t e 3} = \\operatorname {S p l i t} (F _ {g a t e}), \\\\ = F _ {: g}, F _ {g: 2 g}, F _ {2 g: 3 g}, F _ {3 g}. \\\\ \\end{array}", + "image_path": "35623c5612d71338e09941264b96e83f4fddcf408b0156ea332533ff5d0bbd24.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 383, + 296, + 456 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 383, + 296, + 456 + ], + "spans": [ + { + "bbox": [ + 55, + 383, + 296, + 456 + ], + "type": "text", + "content": "Here, " + }, + { + "bbox": [ + 55, + 383, + 296, + 456 + ], + "type": "inline_equation", + "content": "F_{id}" + }, + { + "bbox": [ + 55, + 383, + 296, + 456 + ], + "type": "text", + "content": " is the identity mapping without channel modification. The channel count used in convolution branches, denoted as " + }, + { + "bbox": [ + 55, + 383, + 296, + 456 + ], + "type": "inline_equation", + "content": "g" + }, + { + "bbox": [ + 55, + 383, + 296, + 456 + ], + "type": "text", + "content": ", is determined by a ratio " + }, + { + "bbox": [ + 55, + 383, + 296, + 456 + ], + "type": "inline_equation", + "content": "r_g" + }, + { + "bbox": [ + 55, + 383, + 296, + 456 + ], + "type": "text", + "content": ", computed as " + }, + { + "bbox": [ + 55, + 383, + 296, + 456 + ], + "type": "inline_equation", + "content": "g = r_g C" + }, + { + "bbox": [ + 55, + 383, + 296, + 456 + ], + "type": "text", + "content": ". They set " + }, + { + "bbox": [ + 55, + 383, + 296, + 456 + ], + "type": "inline_equation", + "content": "r_g" + }, + { + "bbox": [ + 55, + 383, + 296, + 456 + ], + "type": "text", + "content": " to 0.25. Subsequently, each branch is processed using large separable kernel (LSK), inspired by large separable kernel attention (LSKA) [57]:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 131, + 464, + 176, + 479 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 464, + 176, + 479 + ], + "spans": [ + { + "bbox": [ + 131, + 464, + 176, + 479 + ], + "type": "interline_equation", + "content": "F _ {i d} ^ {\\prime} = F _ {i d},", + "image_path": "ffa156e79bd432d8297ee8fb75d976bc2fd29b0015d9167843692ed720a5bacd.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 119, + 482, + 294, + 503 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 482, + 294, + 503 + ], + "spans": [ + { + "bbox": [ + 119, + 482, + 294, + 503 + ], + "type": "interline_equation", + "content": "\\begin{array}{c} F _ {g a t e 1} ^ {\\prime} = L S K _ {1 1, 2} \\left(F _ {g a t e 1}\\right), \\\\ \\end{array} \\tag {23}", + "image_path": "e3eb69eb5cd620b53c2d262f2784bb41f4fb5dc9b9c3f2636d12d2ad8c5064f9.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 119, + 499, + 232, + 514 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 499, + 232, + 514 + ], + "spans": [ + { + "bbox": [ + 119, + 499, + 232, + 514 + ], + "type": "interline_equation", + "content": "F _ {g a t e 2} ^ {\\prime} = L S K _ {2 3, 3} \\left(F _ {g a t e 2}\\right),", + "image_path": "ce83c12967ac731688262fcbc99d860413edf99aaafa7cd0270031e44871e97d.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 119, + 517, + 232, + 533 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 517, + 232, + 533 + ], + "spans": [ + { + "bbox": [ + 119, + 517, + 232, + 533 + ], + "type": "interline_equation", + "content": "F _ {g a t e 3} ^ {\\prime} = L S K _ {3 5, 3} \\left(F _ {g a t e 3}\\right),", + "image_path": "0bf0af3476ab1274fdca8718aeca55fcd93d0f9478a7cbb685702fba7d668c43.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 55, + 536, + 295, + 607 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 536, + 295, + 607 + ], + "spans": [ + { + "bbox": [ + 55, + 536, + 295, + 607 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 55, + 536, + 295, + 607 + ], + "type": "inline_equation", + "content": "LSK_{k,d}" + }, + { + "bbox": [ + 55, + 536, + 295, + 607 + ], + "type": "text", + "content": " indicates the kernel size " + }, + { + "bbox": [ + 55, + 536, + 295, + 607 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 55, + 536, + 295, + 607 + ], + "type": "text", + "content": " and dilation factor " + }, + { + "bbox": [ + 55, + 536, + 295, + 607 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 55, + 536, + 295, + 607 + ], + "type": "text", + "content": ". Each LSK is composed of consecutive " + }, + { + "bbox": [ + 55, + 536, + 295, + 607 + ], + "type": "inline_equation", + "content": "1 \\times k" + }, + { + "bbox": [ + 55, + 536, + 295, + 607 + ], + "type": "text", + "content": " depth-wise convolution, " + }, + { + "bbox": [ + 55, + 536, + 295, + 607 + ], + "type": "inline_equation", + "content": "k \\times 1" + }, + { + "bbox": [ + 55, + 536, + 295, + 607 + ], + "type": "text", + "content": " depth-wise convolution, " + }, + { + "bbox": [ + 55, + 536, + 295, + 607 + ], + "type": "inline_equation", + "content": "1 \\times k" + }, + { + "bbox": [ + 55, + 536, + 295, + 607 + ], + "type": "text", + "content": " dilated depth-wise convolution, and " + }, + { + "bbox": [ + 55, + 536, + 295, + 607 + ], + "type": "inline_equation", + "content": "k \\times 1" + }, + { + "bbox": [ + 55, + 536, + 295, + 607 + ], + "type": "text", + "content": " dilated depth-wise convolution. The distinct kernel sizes and dilation factors across branches effectively handle multi-scale features." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 55, + 609, + 296, + 656 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 609, + 296, + 656 + ], + "spans": [ + { + "bbox": [ + 55, + 609, + 296, + 656 + ], + "type": "text", + "content": "After concatenating the outputs from each branch, the combined result is integrated with " + }, + { + "bbox": [ + 55, + 609, + 296, + 656 + ], + "type": "inline_equation", + "content": "V" + }, + { + "bbox": [ + 55, + 609, + 296, + 656 + ], + "type": "text", + "content": " through an element-wise product. Subsequently, " + }, + { + "bbox": [ + 55, + 609, + 296, + 656 + ], + "type": "inline_equation", + "content": "1 \\times 1" + }, + { + "bbox": [ + 55, + 609, + 296, + 656 + ], + "type": "text", + "content": " convolution is applied to obtain the final output as follows:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 55, + 663, + 296, + 689 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 663, + 296, + 689 + ], + "spans": [ + { + "bbox": [ + 55, + 663, + 296, + 689 + ], + "type": "interline_equation", + "content": "F _ {o u t} = \\operatorname {C o n v} _ {1 \\times 1} \\left(V \\odot \\operatorname {C o n c a t} \\left(F _ {i d} ^ {\\prime}, F _ {\\text {g a t e} 1} ^ {\\prime}, F _ {\\text {g a t e} 2} ^ {\\prime}, F _ {\\text {g a t e} 3} ^ {\\prime}\\right)\\right) \\tag {24}", + "image_path": "2b41b73c636b5b1a0ad078220052dac9269ef257e790e5d38e6d2a351163d0b6.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 55, + 689, + 296, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 689, + 296, + 714 + ], + "spans": [ + { + "bbox": [ + 55, + 689, + 296, + 714 + ], + "type": "text", + "content": "This " + }, + { + "bbox": [ + 55, + 689, + 296, + 714 + ], + "type": "inline_equation", + "content": "F_{out}" + }, + { + "bbox": [ + 55, + 689, + 296, + 714 + ], + "type": "text", + "content": " is then fed into EFN [122]. For further EFN details, refer to CFSR [122]." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 298, + 553, + 370 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 298, + 553, + 370 + ], + "spans": [ + { + "bbox": [ + 313, + 298, + 553, + 370 + ], + "type": "text", + "content": "While CFSR [122] employs a " + }, + { + "bbox": [ + 313, + 298, + 553, + 370 + ], + "type": "inline_equation", + "content": "3 \\times 3" + }, + { + "bbox": [ + 313, + 298, + 553, + 370 + ], + "type": "text", + "content": " convolution tail for deep feature extraction, it has limitations in establishing long-range connections, restricting the representational capability of reconstructed features. To overcome this, they propose LSKAT inspired by the large kernel attention tail(LKAT) [119], as depicted in Fig. 28." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 371, + 555, + 514 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 371, + 555, + 514 + ], + "spans": [ + { + "bbox": [ + 313, + 371, + 555, + 514 + ], + "type": "text", + "content": "Training Details. Their approach leverages DIV2K[103], Flickr2K[70], and the first 10K portion of LSDIR[64]. In each RMAB, the number of channels, RMABs, and MAALs are set to 48, 3, and 2-3-2, respectively. During training, they used 256 HR RGB patches with a batch size of 64. Data augmentation included random flips and rotations. Parameters are optimized using the L1 loss and the Adam optimizer[54]. The learning rate started at " + }, + { + "bbox": [ + 313, + 371, + 555, + 514 + ], + "type": "inline_equation", + "content": "1 \\times 10^{-3}" + }, + { + "bbox": [ + 313, + 371, + 555, + 514 + ], + "type": "text", + "content": " and decreasing to " + }, + { + "bbox": [ + 313, + 371, + 555, + 514 + ], + "type": "inline_equation", + "content": "1 \\times 10^{-6}" + }, + { + "bbox": [ + 313, + 371, + 555, + 514 + ], + "type": "text", + "content": " using a cosine annealing scheduler. The network is trained for 1,000K iterations, implemented in PyTorch, and executed on an NVIDIA RTX 3090 GPU." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 314, + 526, + 372, + 537 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 526, + 372, + 537 + ], + "spans": [ + { + "bbox": [ + 314, + 526, + 372, + 537 + ], + "type": "text", + "content": "4.26.MILA" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 313, + 544, + 554, + 676 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 544, + 554, + 676 + ], + "spans": [ + { + "bbox": [ + 313, + 544, + 554, + 676 + ], + "type": "text", + "content": "General Method Description. As shown in Figure 29, inspired by the efficient approximation of self-attention (EASA) [144], they introduce local variance and design LVSA. Additionally, inspired by MDRN [81] and AGDN [114], they consider the impact of multi-level branches on performance. Therefore, they design a multi-level variance feature modulation block that incorporates non-local information with local variance perception at two different levels. This design aims to better leverage the interplay between local and non-local features while balancing performance and model complexity." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 313, + 677, + 554, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 677, + 554, + 714 + ], + "spans": [ + { + "bbox": [ + 313, + 677, + 554, + 714 + ], + "type": "text", + "content": "The gated-dconv feed-forward network (GDFN) [132] introduces gating mechanism and depth-wise convolutions to encode information from spatially adjacent pixel posi" + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 732, + 310, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 732, + 310, + 742 + ], + "spans": [ + { + "bbox": [ + 299, + 732, + 310, + 742 + ], + "type": "text", + "content": "28" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 27 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 294, + 133 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 294, + 133 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 294, + 133 + ], + "type": "text", + "content": "tions, which is highly useful for learning local image structures to achieve effective restoration. However, the single gating structure is relatively simple and cannot effectively capture and blend local contextual information. Therefore, they propose the symmetric gated feed-forward network." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 140, + 295, + 176 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 140, + 295, + 176 + ], + "spans": [ + { + "bbox": [ + 55, + 140, + 295, + 176 + ], + "type": "text", + "content": "Training Description. The proposed MVFMNet has 6 FMMs, in which the number of feature channels is set to 26. The details of the training steps are as follows:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 52, + 183, + 294, + 351 + ], + "type": "list", + "angle": 0, + "index": 4, + "blocks": [ + { + "bbox": [ + 52, + 183, + 294, + 278 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 183, + 294, + 278 + ], + "spans": [ + { + "bbox": [ + 52, + 183, + 294, + 278 + ], + "type": "text", + "content": "1. Pretraining on the DF2K and the first 1k images of LSDIR datasets. HR patches of size " + }, + { + "bbox": [ + 52, + 183, + 294, + 278 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 52, + 183, + 294, + 278 + ], + "type": "text", + "content": " are randomly cropped from HR images, and the mini-batch size is set to 64. The model is trained by minimizing L1 loss and the frequency loss [14] with Adam optimizer for total 100k iterations. They set the initial learning rate to " + }, + { + "bbox": [ + 52, + 183, + 294, + 278 + ], + "type": "inline_equation", + "content": "1 \\times 10^{-3}" + }, + { + "bbox": [ + 52, + 183, + 294, + 278 + ], + "type": "text", + "content": " and the minimum one to " + }, + { + "bbox": [ + 52, + 183, + 294, + 278 + ], + "type": "inline_equation", + "content": "1 \\times 10^{-6}" + }, + { + "bbox": [ + 52, + 183, + 294, + 278 + ], + "type": "text", + "content": ", which is updated by the Cosine Annealing scheme [78]." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 52, + 279, + 294, + 351 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 279, + 294, + 351 + ], + "spans": [ + { + "bbox": [ + 52, + 279, + 294, + 351 + ], + "type": "text", + "content": "2. Finetuning on the DF2K and the first 1k images of LSDIR datasets. HR patch size and mini-batch size are set to " + }, + { + "bbox": [ + 52, + 279, + 294, + 351 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 52, + 279, + 294, + 351 + ], + "type": "text", + "content": " and 64, respectively. The model is fine-tuned by minimizing the L2 loss function. The learning rate is initialized at " + }, + { + "bbox": [ + 52, + 279, + 294, + 351 + ], + "type": "inline_equation", + "content": "2 \\times 10^{-5}" + }, + { + "bbox": [ + 52, + 279, + 294, + 351 + ], + "type": "text", + "content": " and gradually decreased to " + }, + { + "bbox": [ + 52, + 279, + 294, + 351 + ], + "type": "inline_equation", + "content": "1 \\times 10^{-8}" + }, + { + "bbox": [ + 52, + 279, + 294, + 351 + ], + "type": "text", + "content": " over 500k iterations using the Cosine Annealing scheme." + } + ] + } + ], + "index": 3 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 55, + 381, + 129, + 392 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 381, + 129, + 392 + ], + "spans": [ + { + "bbox": [ + 55, + 381, + 129, + 392 + ], + "type": "text", + "content": "4.27. AiMF_SR" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 406, + 296, + 562 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 406, + 296, + 562 + ], + "spans": [ + { + "bbox": [ + 55, + 406, + 296, + 562 + ], + "type": "text", + "content": "Method Details. They propose a novel Mixture of Efficient Attention (MoEA) architecture for efficient superresolution tasks. The architecture includes a shallow feature extractor, multiple Feature Representation Modules (FRMs), and an efficient reconstruction and upsampling module. Initially, a shallow " + }, + { + "bbox": [ + 55, + 406, + 296, + 562 + ], + "type": "inline_equation", + "content": "3 \\times 3" + }, + { + "bbox": [ + 55, + 406, + 296, + 562 + ], + "type": "text", + "content": " convolutional layer reduces computational load, generating compact feature representations. Deep feature extraction employs transformer-inspired blocks with pre-normalization, incorporating Mixture-of-Experts (MoE) Blocks [131] for efficient attention and Depth Feed Forward Networks (DepthFFN) for capturing depth-wise interactions. Details of the architecture can be seen in Fig. 30." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 570, + 296, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 570, + 296, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 570, + 296, + 713 + ], + "type": "text", + "content": "The MoEBlock consists of two parallel feature pathways (Fig. 30). The input features " + }, + { + "bbox": [ + 55, + 570, + 296, + 713 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 55, + 570, + 296, + 713 + ], + "type": "text", + "content": " are first projected into two distinct feature sets " + }, + { + "bbox": [ + 55, + 570, + 296, + 713 + ], + "type": "inline_equation", + "content": "x_{a}" + }, + { + "bbox": [ + 55, + 570, + 296, + 713 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 55, + 570, + 296, + 713 + ], + "type": "inline_equation", + "content": "x_{b}" + }, + { + "bbox": [ + 55, + 570, + 296, + 713 + ], + "type": "text", + "content": " using a pointwise convolution. The first branch, " + }, + { + "bbox": [ + 55, + 570, + 296, + 713 + ], + "type": "inline_equation", + "content": "x_{a}" + }, + { + "bbox": [ + 55, + 570, + 296, + 713 + ], + "type": "text", + "content": ", undergoes both adaptive average and max pooling followed by depth-wise convolutions. The pooling is done in scale of 8 [145]. These pooling layers followed by depth-wise convolutions serve as efficient attention-like mechanism. Then, it combines these features through element-wise addition, nonlinear activation (GELU), and interpolation. The second branch, " + }, + { + "bbox": [ + 55, + 570, + 296, + 713 + ], + "type": "inline_equation", + "content": "x_{b}" + }, + { + "bbox": [ + 55, + 570, + 296, + 713 + ], + "type": "text", + "content": ", is processed via depth-wise and pointwise convolutions with GELU activation." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 315, + 101, + 558, + 184 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 101, + 558, + 184 + ], + "spans": [ + { + "bbox": [ + 315, + 101, + 558, + 184 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} x _ {a} = \\operatorname {D W C o n v} \\left(\\operatorname {A v g P o o l} \\left(x _ {a}\\right)\\right) + \\operatorname {D W C o n v} \\left(\\operatorname {M a x P o o l} \\left(x _ {a}\\right)\\right), \\\\ x _ {a} ^ {\\prime} = \\mathcal {U} (\\mathcal {G} (\\operatorname {P W C o n v} (x _ {a}))), \\\\ x _ {a} ^ {\\prime} = \\operatorname {P W C o n v} \\left(x _ {a} ^ {\\prime}\\right), \\\\ x _ {b} ^ {\\prime} = \\mathcal {G} (\\operatorname {P W C o n v} (\\operatorname {D W C o n v} (x _ {b}))), \\\\ x _ {a b} = \\mathcal {C} \\left(x _ {a} ^ {\\prime}, x _ {b} ^ {\\prime}\\right). \\tag {25} \\\\ \\end{array}", + "image_path": "60242a8eb145e6d82d6a9916b55caa48e484b5c2f5637f411347ab1d8bbf239f.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 188, + 553, + 272 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 188, + 553, + 272 + ], + "spans": [ + { + "bbox": [ + 313, + 188, + 553, + 272 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 313, + 188, + 553, + 272 + ], + "type": "inline_equation", + "content": "x_{a}, x_{b}" + }, + { + "bbox": [ + 313, + 188, + 553, + 272 + ], + "type": "text", + "content": " are concatenated and passed through the Router (gating network), " + }, + { + "bbox": [ + 313, + 188, + 553, + 272 + ], + "type": "inline_equation", + "content": "\\mathcal{R}" + }, + { + "bbox": [ + 313, + 188, + 553, + 272 + ], + "type": "text", + "content": ", which adaptively selects the top- " + }, + { + "bbox": [ + 313, + 188, + 553, + 272 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 313, + 188, + 553, + 272 + ], + "type": "text", + "content": " expert paths based on the channel-wise global average-pooled features in the MoE-layer. Each selected expert independently processes " + }, + { + "bbox": [ + 313, + 188, + 553, + 272 + ], + "type": "inline_equation", + "content": "x_{a}'" + }, + { + "bbox": [ + 313, + 188, + 553, + 272 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 188, + 553, + 272 + ], + "type": "inline_equation", + "content": "x_{b}'" + }, + { + "bbox": [ + 313, + 188, + 553, + 272 + ], + "type": "text", + "content": " through pointwise convolutions, multiplies them element-wise, and applies a final convolution for feature integration:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 316, + 305, + 553, + 357 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 305, + 553, + 357 + ], + "spans": [ + { + "bbox": [ + 316, + 305, + 553, + 357 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\operatorname {l o g i t s} = \\mathcal {R} (x _ {a b}), \\\\ x _ {a} ^ {\\prime}, x _ {b} ^ {\\prime} = \\operatorname {T o p K} (\\operatorname {S o f t m a x} (\\log_ {i} i)) \\\\ \\operatorname {E x p e r t} \\left(x _ {a} ^ {\\prime}, x _ {b} ^ {\\prime}\\right) = \\operatorname {P W C o n v} \\left[ \\operatorname {P W C o n v} \\left(x _ {a} ^ {\\prime}\\right) \\times \\operatorname {P W C o n v} \\left(x _ {b} ^ {\\prime}\\right) \\right] \\tag {26} \\\\ \\end{array}", + "image_path": "34d5a6b39ed4f2d0efda79eb187a09581f21d2d9c28410b6ab26705d2c94fb35.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 362, + 554, + 493 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 362, + 554, + 493 + ], + "spans": [ + { + "bbox": [ + 313, + 362, + 554, + 493 + ], + "type": "text", + "content": "Multiple FRMs (LayerNorm-MoEBlock-LayerNorm-DepthFFN sequences) are stacked for deep feature extraction. For reconstruction, global contextual features from deep extraction combine with shallow features via residual connections, followed by PixelShuffle-based upsampling to produce high-resolution outputs. The model uses GELU activation, Layer Normalization. Their MoE layer dynamically routes features across numExperts " + }, + { + "bbox": [ + 313, + 362, + 554, + 493 + ], + "type": "inline_equation", + "content": "= 3" + }, + { + "bbox": [ + 313, + 362, + 554, + 493 + ], + "type": "text", + "content": ", selecting the top " + }, + { + "bbox": [ + 313, + 362, + 554, + 493 + ], + "type": "inline_equation", + "content": "k = 1" + }, + { + "bbox": [ + 313, + 362, + 554, + 493 + ], + "type": "text", + "content": " experts at training time, allowing a flexible and adaptive processing pipeline tailored specifically to input feature characteristics." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 498, + 555, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 498, + 555, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 498, + 555, + 713 + ], + "type": "text", + "content": "Training Strategy. The model is trained and tested on BasicSR [115] setting. First, the model is initially trained on DIV2K_LSDIR_x2, then further finetuned with DIV2K_LSDIR_x3 dataset for 500,000 iterations respectively, in which these scales are made with bicubic downsampling. The x4 scale model is finetuned on top of the x3 model over 500,000 iterations with the initial learning rate of " + }, + { + "bbox": [ + 313, + 498, + 555, + 713 + ], + "type": "inline_equation", + "content": "1 \\times 10^{-3}" + }, + { + "bbox": [ + 313, + 498, + 555, + 713 + ], + "type": "text", + "content": " using the Adam optimizer. The learning rate decayed at iterations [250,000, 400,000, 450,000, 475,000]. The training pipeline included data augmentations such as random horizontal flips, vertical flips and rotations. The model is optimized using L1 Loss and Fast Fourier Transform (FFT) Loss [95] with 1.0 and 0.1 weights, respectively. All reported implementations are carried out using Python (version 3.9) programming language and PyTorch Framework, utilizing one RTX4090, 24GB VRAM and 16-core CPU. Training is conducted over approximately 23 days with a single GPU of batch size of 16." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 732, + 310, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 732, + 310, + 742 + ], + "spans": [ + { + "bbox": [ + 299, + 732, + 310, + 742 + ], + "type": "text", + "content": "29" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 28 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 61, + 71, + 550, + 157 + ], + "blocks": [ + { + "bbox": [ + 61, + 71, + 550, + 157 + ], + "lines": [ + { + "bbox": [ + 61, + 71, + 550, + 157 + ], + "spans": [ + { + "bbox": [ + 61, + 71, + 550, + 157 + ], + "type": "image", + "image_path": "c40b65ab994e2395bc7a92b4cda211ba91920ed236cf7a59b3f243698618b855.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 61, + 159, + 550, + 276 + ], + "blocks": [ + { + "bbox": [ + 61, + 159, + 550, + 276 + ], + "lines": [ + { + "bbox": [ + 61, + 159, + 550, + 276 + ], + "spans": [ + { + "bbox": [ + 61, + 159, + 550, + 276 + ], + "type": "image", + "image_path": "01eacf3cccd113c242f920e122e9dd250100f74c8f5e7f603cf5c928a27f10c7.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 61, + 277, + 549, + 346 + ], + "blocks": [ + { + "bbox": [ + 61, + 277, + 549, + 346 + ], + "lines": [ + { + "bbox": [ + 61, + 277, + 549, + 346 + ], + "spans": [ + { + "bbox": [ + 61, + 277, + 549, + 346 + ], + "type": "image", + "image_path": "a2c01cfe6e304254748277a8f823d85316e6c8e8774d19c15de4428bf13a838f.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 78, + 350, + 97, + 369 + ], + "blocks": [ + { + "bbox": [ + 78, + 350, + 97, + 369 + ], + "lines": [ + { + "bbox": [ + 78, + 350, + 97, + 369 + ], + "spans": [ + { + "bbox": [ + 78, + 350, + 97, + 369 + ], + "type": "image", + "image_path": "f872827f0bbe062c88b104ed0c499d216e8b302b4a8aa46faf3793bedbc4cf18.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 100, + 355, + 177, + 365 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 100, + 355, + 177, + 365 + ], + "spans": [ + { + "bbox": [ + 100, + 355, + 177, + 365 + ], + "type": "text", + "content": "Adaptive Max Pooling" + } + ] + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 200, + 350, + 219, + 369 + ], + "blocks": [ + { + "bbox": [ + 200, + 350, + 219, + 369 + ], + "lines": [ + { + "bbox": [ + 200, + 350, + 219, + 369 + ], + "spans": [ + { + "bbox": [ + 200, + 350, + 219, + 369 + ], + "type": "image", + "image_path": "50d512d971059ff34fadef4244b80d50193d8309a9a13d88ca8dfe1dfda61946.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 170, + 407, + 439, + 418 + ], + "lines": [ + { + "bbox": [ + 170, + 407, + 439, + 418 + ], + "spans": [ + { + "bbox": [ + 170, + 407, + 439, + 418 + ], + "type": "text", + "content": "Figure 29. Team MILA: Network architecture of the proposed MVFMNet." + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "bbox": [ + 220, + 356, + 274, + 365 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 220, + 356, + 274, + 365 + ], + "spans": [ + { + "bbox": [ + 220, + 356, + 274, + 365 + ], + "type": "text", + "content": "Local Variance" + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 304, + 350, + 322, + 369 + ], + "blocks": [ + { + "bbox": [ + 304, + 350, + 322, + 369 + ], + "lines": [ + { + "bbox": [ + 304, + 350, + 322, + 369 + ], + "spans": [ + { + "bbox": [ + 304, + 350, + 322, + 369 + ], + "type": "image", + "image_path": "47a8f5767564f21bec0d29e6ae704dea3d6b3837b42286dffc841e399e9070d8.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "bbox": [ + 326, + 356, + 402, + 365 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 326, + 356, + 402, + 365 + ], + "spans": [ + { + "bbox": [ + 326, + 356, + 402, + 365 + ], + "type": "text", + "content": "Channel Concatenate" + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 425, + 350, + 444, + 369 + ], + "blocks": [ + { + "bbox": [ + 425, + 350, + 444, + 369 + ], + "lines": [ + { + "bbox": [ + 425, + 350, + 444, + 369 + ], + "spans": [ + { + "bbox": [ + 425, + 350, + 444, + 369 + ], + "type": "image", + "image_path": "12903d3b3084e11b5f896b87149ea6ab71e21ca200bfcd42111d265b7224d8f0.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "bbox": [ + 447, + 356, + 524, + 364 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 447, + 356, + 524, + 364 + ], + "spans": [ + { + "bbox": [ + 447, + 356, + 524, + 364 + ], + "type": "text", + "content": "Element-wise Addition" + } + ] + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 78, + 373, + 97, + 390 + ], + "blocks": [ + { + "bbox": [ + 78, + 373, + 97, + 390 + ], + "lines": [ + { + "bbox": [ + 78, + 373, + 97, + 390 + ], + "spans": [ + { + "bbox": [ + 78, + 373, + 97, + 390 + ], + "type": "image", + "image_path": "417ed3a4f6c7a940301d0c59980746b58cddeb2e843610fa81ff47d3e406ebca.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "bbox": [ + 101, + 376, + 174, + 386 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 101, + 376, + 174, + 386 + ], + "spans": [ + { + "bbox": [ + 101, + 376, + 174, + 386 + ], + "type": "text", + "content": "Nearest Up-sampling" + } + ] + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 200, + 373, + 219, + 390 + ], + "blocks": [ + { + "bbox": [ + 200, + 373, + 219, + 390 + ], + "lines": [ + { + "bbox": [ + 200, + 373, + 219, + 390 + ], + "spans": [ + { + "bbox": [ + 200, + 373, + 219, + 390 + ], + "type": "image", + "image_path": "2d645b7edb13434af06462871050d1113b7f8262449bfe3d9336cd691170f68d.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "bbox": [ + 222, + 377, + 264, + 386 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 222, + 377, + 264, + 386 + ], + "spans": [ + { + "bbox": [ + 222, + 377, + 264, + 386 + ], + "type": "text", + "content": "Chanel Split" + } + ] + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 304, + 373, + 322, + 390 + ], + "blocks": [ + { + "bbox": [ + 304, + 373, + 322, + 390 + ], + "lines": [ + { + "bbox": [ + 304, + 373, + 322, + 390 + ], + "spans": [ + { + "bbox": [ + 304, + 373, + 322, + 390 + ], + "type": "image", + "image_path": "5c4fffafcd87b05c0eb660d6ac051e73323321b85c6274fe2d1d425f63153e14.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "bbox": [ + 326, + 377, + 383, + 385 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 326, + 377, + 383, + 385 + ], + "spans": [ + { + "bbox": [ + 326, + 377, + 383, + 385 + ], + "type": "text", + "content": "GELU Activation" + } + ] + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 425, + 371, + 444, + 390 + ], + "blocks": [ + { + "bbox": [ + 425, + 371, + 444, + 390 + ], + "lines": [ + { + "bbox": [ + 425, + 371, + 444, + 390 + ], + "spans": [ + { + "bbox": [ + 425, + 371, + 444, + 390 + ], + "type": "image", + "image_path": "7b7ae85f98846211c1488657fe8c48f7df95a130ea033e4222c2cc126ad7abe3.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + } + ], + "index": 17 + }, + { + "bbox": [ + 447, + 377, + 523, + 385 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 447, + 377, + 523, + 385 + ], + "spans": [ + { + "bbox": [ + 447, + 377, + 523, + 385 + ], + "type": "text", + "content": "Element-wise Product" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 55, + 439, + 126, + 451 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 439, + 126, + 451 + ], + "spans": [ + { + "bbox": [ + 55, + 439, + 126, + 451 + ], + "type": "text", + "content": "4.28. BVIVSR" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 55, + 462, + 296, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 462, + 296, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 462, + 296, + 713 + ], + "type": "text", + "content": "Method Description. Their solution is built on the advances in state-of-the-art single-image super-resolution (SISR) methods [11, 18, 87, 141, 149], particularly the efficient Transformer-based models [52, 139], the continuous super-resolution approaches, such as HiIF [49, 52], and the knowledge distillation strategies [48, 50, 51]. They employ an efficient Transformer-based network architecture, as illustrated in Fig. 31, where the core component is the Hierarchical Encoding Transformer (HiET) layer. The HiET layer was first proposed in [52] and it is specifically designed to capture rich structural dependencies across various regions of the image, enabling the model to handle complex visual patterns effectively. To enhance the capacity of the model for multi-scale feature representations, each HiET layer is set with different window sizes, allowing it to attend to both local and global contexts. Furthermore, the overall architecture incorporates a modified U-Net structure, where skip connections are introduced between symmetric HiET layers at different depths. This design facilitates efficient multi-level feature fusion and ensures better preservation and reconstruction of fine-grained details" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 313, + 440, + 555, + 499 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 440, + 555, + 499 + ], + "spans": [ + { + "bbox": [ + 313, + 440, + 555, + 499 + ], + "type": "text", + "content": "in the super-resolved outputs. In addition, they also apply the multi-teacher knowledge distillation strategy [48] to improve the performance of the lightweight C2D-ISR model, where SRFormer [147], MambaIR [32] and EDSR [70] are employed as teacher networks." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 313, + 502, + 556, + 670 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 502, + 556, + 670 + ], + "spans": [ + { + "bbox": [ + 313, + 502, + 556, + 670 + ], + "type": "text", + "content": "Training Details. They use the DIV2K [102], 1000 2K images from BVI-AOM [82], Flickr2K [70] and 5000 images from LSDIR[64] as training dataset. For evaluation, they follow common practice and employ the DIV2K validation set (containing 100 images) [102]. The maximum learning rate is set to " + }, + { + "bbox": [ + 313, + 502, + 556, + 670 + ], + "type": "inline_equation", + "content": "4 \\times 10^{-4}" + }, + { + "bbox": [ + 313, + 502, + 556, + 670 + ], + "type": "text", + "content": ". The learning rate follows a cosine annealing schedule, gradually decreasing after an initial warm-up phase of 50 epochs. They use L1 loss and the Adam [54] optimization during training. Training and testing are implemented based on 4 NVIDIA 4090 GPUs. The model comprises 154.8K parameters with an input size of " + }, + { + "bbox": [ + 313, + 502, + 556, + 670 + ], + "type": "inline_equation", + "content": "64 \\times 64 \\times 3" + }, + { + "bbox": [ + 313, + 502, + 556, + 670 + ], + "type": "text", + "content": " and it was trained for 1000 epochs with 16 batch sizes per GPU. The training of their solution contains five stages:" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 313, + 677, + 556, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 677, + 556, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 677, + 556, + 713 + ], + "type": "text", + "content": "- Training the teacher networks, including SRFormer [147], MambaIR [32] and EDSR [70], by using the original settings in their papers;" + } + ] + } + ], + "index": 24 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 732, + 311, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 732, + 311, + 742 + ], + "spans": [ + { + "bbox": [ + 299, + 732, + 311, + 742 + ], + "type": "text", + "content": "30" + } + ] + } + ], + "index": 25 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 29 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 82, + 100, + 531, + 376 + ], + "blocks": [ + { + "bbox": [ + 82, + 100, + 531, + 376 + ], + "lines": [ + { + "bbox": [ + 82, + 100, + 531, + 376 + ], + "spans": [ + { + "bbox": [ + 82, + 100, + 531, + 376 + ], + "type": "image", + "image_path": "253833a7e355d218a6f8858267ec14826e3afdeb8733a9e206f03c2e38f8543b.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 127, + 379, + 482, + 391 + ], + "lines": [ + { + "bbox": [ + 127, + 379, + 482, + 391 + ], + "spans": [ + { + "bbox": [ + 127, + 379, + 482, + 391 + ], + "type": "text", + "content": "Figure 30. Team AiMF_SR: Main Figure of Proposed Architecture, Mixture of Efficient Attention." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 89, + 402, + 532, + 506 + ], + "blocks": [ + { + "bbox": [ + 89, + 402, + 532, + 506 + ], + "lines": [ + { + "bbox": [ + 89, + 402, + 532, + 506 + ], + "spans": [ + { + "bbox": [ + 89, + 402, + 532, + 506 + ], + "type": "image", + "image_path": "6ddd8335873af8c5a39067d510e2bb84a138cae90a3d4c71cc8cbfc3b65e5ffc.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 204, + 514, + 404, + 525 + ], + "lines": [ + { + "bbox": [ + 204, + 514, + 404, + 525 + ], + "spans": [ + { + "bbox": [ + 204, + 514, + 404, + 525 + ], + "type": "text", + "content": "Figure 31. Team BVIVSR: The structure of the method." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 536, + 295, + 680 + ], + "type": "list", + "angle": 0, + "index": 8, + "blocks": [ + { + "bbox": [ + 55, + 536, + 295, + 571 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 536, + 295, + 571 + ], + "spans": [ + { + "bbox": [ + 55, + 536, + 295, + 571 + ], + "type": "text", + "content": "- The teacher aggregation of multi-teacher knowledge distillation (MTKD) strategy [48] was adapted to the above teacher networks to obtain an enhanced teacher network;" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 573, + 295, + 618 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 573, + 295, + 618 + ], + "spans": [ + { + "bbox": [ + 55, + 573, + 295, + 618 + ], + "type": "text", + "content": "- Training the lightweight C2D-ISR model [52] on continuous scales i.e, from " + }, + { + "bbox": [ + 55, + 573, + 295, + 618 + ], + "type": "inline_equation", + "content": "\\times 2" + }, + { + "bbox": [ + 55, + 573, + 295, + 618 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 55, + 573, + 295, + 618 + ], + "type": "inline_equation", + "content": "\\times 4" + }, + { + "bbox": [ + 55, + 573, + 295, + 618 + ], + "type": "text", + "content": ", to learn the correlation between multiple scales and better recover high-frequency details;" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 620, + 295, + 654 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 620, + 295, + 654 + ], + "spans": [ + { + "bbox": [ + 55, + 620, + 295, + 654 + ], + "type": "text", + "content": "- The learned C2D-ISR model was distilled by the MTKD strategy [48] with their enhanced teacher network to obtain the enhanced student model;" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 56, + 656, + 295, + 680 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 656, + 295, + 680 + ], + "spans": [ + { + "bbox": [ + 56, + 656, + 295, + 680 + ], + "type": "text", + "content": "- Finetuning the enhanced student model by increasing the patch size from " + }, + { + "bbox": [ + 56, + 656, + 295, + 680 + ], + "type": "inline_equation", + "content": "64 \\times 64" + }, + { + "bbox": [ + 56, + 656, + 295, + 680 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 56, + 656, + 295, + 680 + ], + "type": "inline_equation", + "content": "128 \\times 128" + }, + { + "bbox": [ + 56, + 656, + 295, + 680 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 7 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 314, + 536, + 397, + 547 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 536, + 397, + 547 + ], + "spans": [ + { + "bbox": [ + 314, + 536, + 397, + 547 + ], + "type": "text", + "content": "4.29.CUIT_HTT" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 558, + 555, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 558, + 555, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 558, + 555, + 713 + ], + "type": "text", + "content": "General Method Description. The overall architecture of the proposed method is illustrated in Fig. 32(a), which consists of three main components: the shallow feature extraction module, the deep feature extraction module, and the reconstruction and upsampling module. The shallow feature extraction module employs a BSConv [34] module to extract low-level features such as edges and textures from the input image " + }, + { + "bbox": [ + 313, + 558, + 555, + 713 + ], + "type": "inline_equation", + "content": "I^{in} \\in \\mathbb{R}^{3 \\times H \\times W}" + }, + { + "bbox": [ + 313, + 558, + 555, + 713 + ], + "type": "text", + "content": ", mapping it to the feature space " + }, + { + "bbox": [ + 313, + 558, + 555, + 713 + ], + "type": "inline_equation", + "content": "f^0 \\in \\mathbb{R}^{C \\times H \\times W}" + }, + { + "bbox": [ + 313, + 558, + 555, + 713 + ], + "type": "text", + "content": " for further processing. The extracted shallow features are then fed into the deep feature extraction module, which is composed of multiple Frequency-Segmented Attention Blocks (FSABs) designed in this work. The outputs of each FSAB are concatenated" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 732, + 310, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 732, + 310, + 742 + ], + "spans": [ + { + "bbox": [ + 299, + 732, + 310, + 742 + ], + "type": "text", + "content": "31" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 30 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 59, + 75, + 531, + 170 + ], + "blocks": [ + { + "bbox": [ + 59, + 75, + 531, + 170 + ], + "lines": [ + { + "bbox": [ + 59, + 75, + 531, + 170 + ], + "spans": [ + { + "bbox": [ + 59, + 75, + 531, + 170 + ], + "type": "image", + "image_path": "f515d21064efcb5f99d823286f26e5e7ffc92eccff0a11ad3be8c813baca6d94.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 58, + 171, + 159, + 352 + ], + "blocks": [ + { + "bbox": [ + 58, + 171, + 159, + 352 + ], + "lines": [ + { + "bbox": [ + 58, + 171, + 159, + 352 + ], + "spans": [ + { + "bbox": [ + 58, + 171, + 159, + 352 + ], + "type": "image", + "image_path": "e71224c1294caaddfff3a7868d78eb34cb3399b9a3e70cfae73f0f5093a5bc12.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 358, + 555, + 392 + ], + "lines": [ + { + "bbox": [ + 55, + 358, + 555, + 392 + ], + "spans": [ + { + "bbox": [ + 55, + 358, + 555, + 392 + ], + "type": "text", + "content": "Figure 32. Team CUIT_HT: Schematic Diagram of the Method. (a) Overall Architecture of the Model; (b) Frequency-Segmented Attention Block (FSAB); (c) Schematic of the Enhanced Large-kernel Convolution Block (ELCB); (d) Mechanism of Frequency-Segmented Attention (FSA); (e) Frequency Division and Frequency Recombination." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 160, + 173, + 276, + 348 + ], + "blocks": [ + { + "bbox": [ + 160, + 173, + 276, + 348 + ], + "lines": [ + { + "bbox": [ + 160, + 173, + 276, + 348 + ], + "spans": [ + { + "bbox": [ + 160, + 173, + 276, + 348 + ], + "type": "image", + "image_path": "cb4d48375367e6c3831213b678c17fc76e97295cb24d5c23662571f9e2f19896.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 277, + 173, + 531, + 247 + ], + "blocks": [ + { + "bbox": [ + 277, + 173, + 531, + 247 + ], + "lines": [ + { + "bbox": [ + 277, + 173, + 531, + 247 + ], + "spans": [ + { + "bbox": [ + 277, + 173, + 531, + 247 + ], + "type": "image", + "image_path": "0bdef5b2be98fa47128c19bf1077c8c10251c4776405671e6a96c0507aeb5481.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 277, + 248, + 531, + 351 + ], + "blocks": [ + { + "bbox": [ + 277, + 248, + 531, + 351 + ], + "lines": [ + { + "bbox": [ + 277, + 248, + 531, + 351 + ], + "spans": [ + { + "bbox": [ + 277, + 248, + 531, + 351 + ], + "type": "image", + "image_path": "e8b1e59898d3b68eb6fd482318b1fd061e804edafc865f2b99d918a5991b38f5.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 54, + 413, + 294, + 593 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 413, + 294, + 593 + ], + "spans": [ + { + "bbox": [ + 54, + 413, + 294, + 593 + ], + "type": "text", + "content": "along the channel dimension and adjusted using a convolutional module group, constituting the deep feature extraction process. As shown in Fig. 32(b), the FSAB structure includes a Concat operation for channel concatenation and a ConvB module group, which consists of a " + }, + { + "bbox": [ + 54, + 413, + 294, + 593 + ], + "type": "inline_equation", + "content": "1 \\times 1" + }, + { + "bbox": [ + 54, + 413, + 294, + 593 + ], + "type": "text", + "content": " convolution, a GELU activation function, and a BSCov stacked sequentially. Finally, the output of the shallow feature extraction module is added element-wise to the output of the deep feature extraction module via a skip connection and passed to the reconstruction and upsampling module. This module upsamples the feature space information " + }, + { + "bbox": [ + 54, + 413, + 294, + 593 + ], + "type": "inline_equation", + "content": "f^{out} \\in \\mathbb{R}^{C \\times H \\times W}" + }, + { + "bbox": [ + 54, + 413, + 294, + 593 + ], + "type": "text", + "content": " and maps it to the high-resolution output image " + }, + { + "bbox": [ + 54, + 413, + 294, + 593 + ], + "type": "inline_equation", + "content": "I^{SR} \\in \\mathbb{R}^{3 \\times scale \\times H \\times scale \\times W}" + }, + { + "bbox": [ + 54, + 413, + 294, + 593 + ], + "type": "text", + "content": ", where scale is the upscaling factor. In this work, the PixelShuffle method is utilized for upsampling." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 54, + 594, + 295, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 594, + 295, + 714 + ], + "spans": [ + { + "bbox": [ + 54, + 594, + 295, + 714 + ], + "type": "text", + "content": "The Frequency-Segmented Attention Block (FSAB) primarily consists of an information distillation architecture for local feature processing and the proposed Frequency-Segmented Attention (FSA) mechanism for global feature processing. The overall architecture of FSA is illustrated in Fig. 32 (d). The input feature map is first transformed into the frequency domain via the Fast Fourier Transform (FFT), enabling global processing in the spatial domain through frequency domain operations. Inspired by windowed attention, the FDivision operation partitions the frequency spec" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 413, + 555, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 413, + 555, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 413, + 555, + 713 + ], + "type": "text", + "content": "trum into multiple windows, which are concatenated along the channel dimension. A grouped convolution is then applied to process features in different frequency ranges using distinct weights. Subsequently, the FRecombination operation reassembles the segmented frequency windows back into the spectrum. A convolutional layer is applied, and the result is added element-wise to the original spectrum. Finally, the Inverse Fast Fourier Transform (IFFT) is used to convert the processed features back to the spatial domain, and the output is obtained through elementwise multiplication with the original input. As for the information distillation architecture, they adopt the structure of the Residual Feature Distillation Block (RFDB) from RFDN [71], as shown in Fig. 32. (b). However, they replace the convolutional layers with Enhanced Large-kernel Convolution Blocks (ELCB). This module employs large-kernel depthwise convolution on half of the channels and pointwise convolution on the full channels, achieving a large receptive field without significantly increasing the number of parameters. Additionally, structural reparameterization is utilized during training, where multiple branches with different receptive fields are employed. During inference, these branches are equivalently replaced with a single large-kernel convolution module, thereby enhancing the model's learning capability without increasing inference cost." + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 732, + 311, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 732, + 311, + 742 + ], + "spans": [ + { + "bbox": [ + 299, + 732, + 311, + 742 + ], + "type": "text", + "content": "32" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 31 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 294, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 294, + 228 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 294, + 228 + ], + "type": "text", + "content": "Train details. They utilize the DIV2K [4] and Flickr2k [101] dataset and the first 10K images from the LSDIR [64] dataset as the training set for their model. During training, the dataset undergoes random horizontal flipping and " + }, + { + "bbox": [ + 55, + 72, + 294, + 228 + ], + "type": "inline_equation", + "content": "90^{\\circ}" + }, + { + "bbox": [ + 55, + 72, + 294, + 228 + ], + "type": "text", + "content": " rotation. The mini-batch size and input patch size are set to 64 and " + }, + { + "bbox": [ + 55, + 72, + 294, + 228 + ], + "type": "inline_equation", + "content": "64 \\times 64" + }, + { + "bbox": [ + 55, + 72, + 294, + 228 + ], + "type": "text", + "content": ", respectively. The model is optimized using the L1 loss function and the Adam optimizer, with an initial learning rate of " + }, + { + "bbox": [ + 55, + 72, + 294, + 228 + ], + "type": "inline_equation", + "content": "5 \\times 10^{-3}" + }, + { + "bbox": [ + 55, + 72, + 294, + 228 + ], + "type": "text", + "content": ". The learning rate follows a cosine annealing decay schedule over a total of 1000K iterations. Subsequently, the model is fine-tuned using the L2 loss to achieve improved performance. Training is conducted using PyTorch 1.12.1 on a Tesla P100 16G GPU." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 238, + 132, + 249 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 238, + 132, + 249 + ], + "spans": [ + { + "bbox": [ + 55, + 238, + 132, + 249 + ], + "type": "text", + "content": "4.30. GXZY.AI" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 256, + 295, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 256, + 295, + 422 + ], + "spans": [ + { + "bbox": [ + 55, + 256, + 295, + 422 + ], + "type": "text", + "content": "General Method Description. The GXZY AI team proposed a Parameter-free Vision Mamba, as shown in Fig. 33. The work is inspired by MambaIR [33], SPAN [112] and DVMSR [59], PFVM consists of three parts, shallow feature extraction, deep feature extraction and reconstruction module. Shallow feature extraction is achieved by " + }, + { + "bbox": [ + 55, + 256, + 295, + 422 + ], + "type": "inline_equation", + "content": "3 \\times 3" + }, + { + "bbox": [ + 55, + 256, + 295, + 422 + ], + "type": "text", + "content": " convolution, followed by the use of stacked Residue State Space Blocks (RSSBs), which contain the Vision State Space Module (VSSM) to extract deeper features through the capability of Mamba long-range modeling. Then the shallow and deep features are aggregated by a " + }, + { + "bbox": [ + 55, + 256, + 295, + 422 + ], + "type": "inline_equation", + "content": "3 \\times 3" + }, + { + "bbox": [ + 55, + 256, + 295, + 422 + ], + "type": "text", + "content": " convolution along with residual concatenation, and finally upsampling is achieved through a sub-pixel convolutional layer to reconstruct the high resolution image." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 425, + 295, + 532 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 425, + 295, + 532 + ], + "spans": [ + { + "bbox": [ + 55, + 425, + 295, + 532 + ], + "type": "text", + "content": "As shown in Fig. 34, different from the RSSB used in DVMSR, PFVM does not use stacked ViMM modules, but follows the design paradigm of the RSSB in MambaIR, which differs from MambaIR in that 3-residue branching is used in order to maximize the ability of residual learning. In order to obtain better PSNR with approximate inference time, the convolution layer adopts the bottleneck structure, and the channel attention used in MambaIR is replaced by a parameter-free attention." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 533, + 295, + 617 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 533, + 295, + 617 + ], + "spans": [ + { + "bbox": [ + 55, + 533, + 295, + 617 + ], + "type": "text", + "content": "Training Strategy. In the training phase, the GXZY AI team uses the LSDIR [64] dataset for training and the DIV2K [3] validation set for validation. The images in the training set are first cropped with a step size of 240 and a size of 480 to get a series of cropped images. The model was trained on 2 NVIDIA RTX 3090 GPUs. The details of the training steps are as follows:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 618, + 295, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 618, + 295, + 713 + ], + "spans": [ + { + "bbox": [ + 56, + 618, + 295, + 713 + ], + "type": "text", + "content": "1. The HR images are randomly cropped to size 192, and the dataset is augmented using random flipping and rotation. The model is trained from scratch with a batch size set to 64, using the Adam optimizer with the learning rate set to 0.0001, " + }, + { + "bbox": [ + 56, + 618, + 295, + 713 + ], + "type": "inline_equation", + "content": "\\beta_{1} = 0.9" + }, + { + "bbox": [ + 56, + 618, + 295, + 713 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 56, + 618, + 295, + 713 + ], + "type": "inline_equation", + "content": "\\beta_{2} = 0.99" + }, + { + "bbox": [ + 56, + 618, + 295, + 713 + ], + "type": "text", + "content": ", and a Multi-StepLR scheduler with the learning rate halved for every 200,000 iterations for a total of 1,000,000 iterations. The loss function uses L1 loss." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 314, + 72, + 553, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 72, + 553, + 156 + ], + "spans": [ + { + "bbox": [ + 314, + 72, + 553, + 156 + ], + "type": "text", + "content": "2. On the basis of the first step, the model with the optimal PSNR on the DIV2K validation set is loaded as the pre-training model, the size of HR image cropping is adjusted to 256, the learning rate is 0.0002, the learning rate is halved for every 100,000 iterations, and the loss function is still used for 1,000,000 iterations with L1 loss." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 313, + 165, + 369, + 177 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 165, + 369, + 177 + ], + "spans": [ + { + "bbox": [ + 313, + 165, + 369, + 177 + ], + "type": "text", + "content": "4.31. IPCV" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 182, + 553, + 338 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 182, + 553, + 338 + ], + "spans": [ + { + "bbox": [ + 313, + 182, + 553, + 338 + ], + "type": "text", + "content": "This team uses HiT-SR: Hierarchical Transformer for Efficient Image Super-Resolution [140] for this challenge. The Hierarchical Transformer for Efficient Image Super-Resolution (HiT-SR) is a deep learning model designed to upscale low-resolution (LR) images into high-resolution (HR) outputs while maintaining efficiency and high-quality reconstruction. Unlike traditional convolutional neural networks (CNNs), which struggle to capture long-range dependencies, HiT-SR employs a hierarchical self-attention mechanism that efficiently processes multiscale image features. This allows the model to integrate local and global information, improving image detail reconstruction while reducing computational costs." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 339, + 554, + 458 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 339, + 554, + 458 + ], + "spans": [ + { + "bbox": [ + 313, + 339, + 554, + 458 + ], + "type": "text", + "content": "At the core of the network is a hierarchical feature learning process, where image features are extracted and refined progressively through multiple stages. Instead of applying full-resolution self-attention, which is memory intensive, HiT-SR reduces token complexity using patch merging and downsampling modules, allowing efficient computation without loss of essential information. The model further refines these hierarchical features through multiscale self-attention mechanisms, ensuring that fine-grained details and global structures are effectively captured." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 459, + 554, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 459, + 554, + 567 + ], + "spans": [ + { + "bbox": [ + 313, + 459, + 554, + 567 + ], + "type": "text", + "content": "For the final super-resolution reconstruction, HiT-SR aggregates and progressively upsamples the processed features. This multistage refinement approach ensures that high-frequency details are preserved while preventing artifacts common in naive upsampling techniques. The resulting HR image maintains sharp edges, realistic textures, and minimal distortions. They have used available pre-trained model weights [134] on the low resolution images of the test data set and predicted high resolution images." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 575, + 361, + 587 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 575, + 361, + 587 + ], + "spans": [ + { + "bbox": [ + 313, + 575, + 361, + 587 + ], + "type": "text", + "content": "4.32. X-L" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 594, + 554, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 594, + 554, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 594, + 554, + 713 + ], + "type": "text", + "content": "General Method Description. Their proposed partial permuted self-attention network (PPSA-Net) is shown in Fig. 35. PPSA-Net is inspired by two works: SR-Former [147] and PartialConv [9]. SRFormer is a lightweight super-resolution (SR) approach, but it inevitably still has significant redundancy in feature dimensions. To address this, they combine the strengths of PartialConv to further reduce the complexity and the computational cost. Specifically, they use a feature encoder to process the low-resolution image and feed it to four partial per" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 732, + 310, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 732, + 310, + 741 + ], + "spans": [ + { + "bbox": [ + 299, + 732, + 310, + 741 + ], + "type": "text", + "content": "33" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 32 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 133, + 83, + 480, + 266 + ], + "blocks": [ + { + "bbox": [ + 133, + 83, + 480, + 266 + ], + "lines": [ + { + "bbox": [ + 133, + 83, + 480, + 266 + ], + "spans": [ + { + "bbox": [ + 133, + 83, + 480, + 266 + ], + "type": "image", + "image_path": "41a39eaf7e655d5f51d27cf6b8fee4f71730fb177f34e3c0bae53e75a366f369.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 210, + 275, + 400, + 286 + ], + "lines": [ + { + "bbox": [ + 210, + 275, + 400, + 286 + ], + "spans": [ + { + "bbox": [ + 210, + 275, + 400, + 286 + ], + "type": "text", + "content": "Figure 33. Team GXZY.AI: The structure of PFVM." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 132, + 298, + 479, + 488 + ], + "blocks": [ + { + "bbox": [ + 132, + 298, + 479, + 488 + ], + "lines": [ + { + "bbox": [ + 132, + 298, + 479, + 488 + ], + "spans": [ + { + "bbox": [ + 132, + 298, + 479, + 488 + ], + "type": "image", + "image_path": "04628618f2817ad511739ed0c8edd934bec4a877efa1648b1428c4283d5c12db.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 165, + 503, + 444, + 514 + ], + "lines": [ + { + "bbox": [ + 165, + 503, + 444, + 514 + ], + "spans": [ + { + "bbox": [ + 165, + 503, + 444, + 514 + ], + "type": "text", + "content": "Figure 34. Team GXZY AI: The structural details of MambaIR and DVMSR." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 536, + 296, + 680 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 536, + 296, + 680 + ], + "spans": [ + { + "bbox": [ + 55, + 536, + 296, + 680 + ], + "type": "text", + "content": "muted self-attention (PPSA) layers, before finally feeding it into a feature decoder to obtain the final result. In more detail, within each PPSA layer, they use channel split to divide the original features into two sub-features: one comprising " + }, + { + "bbox": [ + 55, + 536, + 296, + 680 + ], + "type": "inline_equation", + "content": "1/4" + }, + { + "bbox": [ + 55, + 536, + 296, + 680 + ], + "type": "text", + "content": " of the channels and the other comprising " + }, + { + "bbox": [ + 55, + 536, + 296, + 680 + ], + "type": "inline_equation", + "content": "3/4" + }, + { + "bbox": [ + 55, + 536, + 296, + 680 + ], + "type": "text", + "content": " of the channels. The " + }, + { + "bbox": [ + 55, + 536, + 296, + 680 + ], + "type": "inline_equation", + "content": "1/4" + }, + { + "bbox": [ + 55, + 536, + 296, + 680 + ], + "type": "text", + "content": " sub-feature is processed by a permuted self-attention block [147], while the " + }, + { + "bbox": [ + 55, + 536, + 296, + 680 + ], + "type": "inline_equation", + "content": "3/4" + }, + { + "bbox": [ + 55, + 536, + 296, + 680 + ], + "type": "text", + "content": " sub-feature remains unchanged. After processing, the two sub-features are concatenated back together. This design allows us to efficiently reduce computational overhead while maintaining the model's ability to capture both local and global information, leading to high-quality SR results." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 689, + 296, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 689, + 296, + 715 + ], + "spans": [ + { + "bbox": [ + 55, + 689, + 296, + 715 + ], + "type": "text", + "content": "Training details. They follow the same training procedure as SRFormer [147]. However, they conduct their training" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 314, + 536, + 457, + 548 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 536, + 457, + 548 + ], + "spans": [ + { + "bbox": [ + 314, + 536, + 457, + 548 + ], + "type": "text", + "content": "using a single NVIDIA 4090 GPU." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 313, + 562, + 409, + 574 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 562, + 409, + 574 + ], + "spans": [ + { + "bbox": [ + 313, + 562, + 409, + 574 + ], + "type": "text", + "content": "4.33.Quantum_Res" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 582, + 555, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 582, + 555, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 582, + 555, + 713 + ], + "type": "text", + "content": "Method Details. In this work, they propose a novel student-teacher framework for super-resolution, as shown in Fig. 36 that enables a lightweight student model to achieve better performance comparable to heavier models. Specifically, to adopt this architecture, they used MambaIRv2-Light [32] as the student model, while MambaIRv2-base [32] serves as the teacher. While they use MambaIRv2-light as an efficiency, their key contribution is demonstrating that a guided student-teacher learning strategy can significantly improve SR performance while keeping model complexity low. [108]" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 732, + 311, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 732, + 311, + 742 + ], + "spans": [ + { + "bbox": [ + 299, + 732, + 311, + 742 + ], + "type": "text", + "content": "34" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 33 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 104, + 70, + 509, + 182 + ], + "blocks": [ + { + "bbox": [ + 104, + 70, + 509, + 182 + ], + "lines": [ + { + "bbox": [ + 104, + 70, + 509, + 182 + ], + "spans": [ + { + "bbox": [ + 104, + 70, + 509, + 182 + ], + "type": "image", + "image_path": "496abd7bd595907443e0670187e23f1f4d56c40d11a3074d787694ea1ce40318.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 195, + 190, + 414, + 202 + ], + "lines": [ + { + "bbox": [ + 195, + 190, + 414, + 202 + ], + "spans": [ + { + "bbox": [ + 195, + 190, + 414, + 202 + ], + "type": "text", + "content": "Figure 35. Team X-L: Overview of the proposed PPSA-Net." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 57, + 220, + 296, + 312 + ], + "blocks": [ + { + "bbox": [ + 57, + 220, + 296, + 312 + ], + "lines": [ + { + "bbox": [ + 57, + 220, + 296, + 312 + ], + "spans": [ + { + "bbox": [ + 57, + 220, + 296, + 312 + ], + "type": "image", + "image_path": "5f12b3c080f9cb9562c8a1755391e2e1c37748366d569130074f4dda3ff55992.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 323, + 296, + 433 + ], + "lines": [ + { + "bbox": [ + 55, + 323, + 296, + 433 + ], + "spans": [ + { + "bbox": [ + 55, + 323, + 296, + 433 + ], + "type": "text", + "content": "Figure 36. Team Quantum_Res: The overall pipeline of efficient super-resolution approach, which employs a student-teacher training paradigm. The high-capacity Teacher Network (MambaIRv2-B) learning is transferred to the lightweight Student Network (MambaIRv2-Light) using knowledge distillation. The student network is optimized using L1 loss to ensure accurate superresolution while maintaining efficiency. The input low-resolution (LR) database serves as the training input, guiding the student model to achieve high-fidelity reconstruction with reduced computational complexity." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 444, + 295, + 540 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 444, + 295, + 540 + ], + "spans": [ + { + "bbox": [ + 55, + 444, + 295, + 540 + ], + "type": "text", + "content": "The student model extracts the initial low-level features from the input low-resolution image using the " + }, + { + "bbox": [ + 55, + 444, + 295, + 540 + ], + "type": "inline_equation", + "content": "3 \\times 3" + }, + { + "bbox": [ + 55, + 444, + 295, + 540 + ], + "type": "text", + "content": " convolutional layer. The core of the network comprises a series of Attentive State-Space Blocks (ASSBs) [32] to capture long-range dependencies efficiently. For each block, residual connections are used to facilitate stable gradient propagation. Finally, a pixel-shuffle-based upsampling module reconstructs the final high-resolution image. [32]" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 541, + 295, + 590 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 541, + 295, + 590 + ], + "spans": [ + { + "bbox": [ + 55, + 541, + 295, + 590 + ], + "type": "text", + "content": "The teacher model, MambaIRv2, follows the same architectural design but with increased depth and wider feature dimensions. This model has significantly more parameters and serves as an upper-bound reference for the student." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 591, + 295, + 687 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 591, + 295, + 687 + ], + "spans": [ + { + "bbox": [ + 55, + 591, + 295, + 687 + ], + "type": "text", + "content": "Teacher-Guided Inference. The teacher model remains frozen throughout training and is only used as a qualitative reference to validate architectural choices and improvements. The student model inherits refined architectural principles from the teacher rather than weight transfer or feature alignment. This allows the student to retain its original lightweight nature while benefiting from structural knowledge obtained from a larger-capacity model [108]." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 689, + 296, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 689, + 296, + 714 + ], + "spans": [ + { + "bbox": [ + 55, + 689, + 296, + 714 + ], + "type": "text", + "content": "Inference Strategy. During inference, an efficient patch-based processing method is applied to handle high-" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 223, + 553, + 282 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 223, + 553, + 282 + ], + "spans": [ + { + "bbox": [ + 313, + 223, + 553, + 282 + ], + "type": "text", + "content": "resolution images. Given an input image, it is divided into overlapping patches. Each patch is processed independently by the student network, and final predictions are blended using a weighted averaging scheme to ensure seamless reconstruction. [32]" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 283, + 555, + 462 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 283, + 555, + 462 + ], + "spans": [ + { + "bbox": [ + 313, + 283, + 555, + 462 + ], + "type": "text", + "content": "Training Details. The student model is initialized using pre-trained weights of MambaIRv2-light. The teacher model is loaded with pre-trained weights from a high-performing MambaIRv2-base variant. Fine-tuning was performed on DIV2K and LSDIR, with the number of feature channels set to 48. The training was conducted on patches of size " + }, + { + "bbox": [ + 313, + 283, + 555, + 462 + ], + "type": "inline_equation", + "content": "192 \\times 192" + }, + { + "bbox": [ + 313, + 283, + 555, + 462 + ], + "type": "text", + "content": " extracted from high-resolution images, using a batch size of 8. The model is finetuned by minimizing the L1 loss function using the Adam optimizer. The initial learning rate is set to " + }, + { + "bbox": [ + 313, + 283, + 555, + 462 + ], + "type": "inline_equation", + "content": "1 \\times 10^{-5}" + }, + { + "bbox": [ + 313, + 283, + 555, + 462 + ], + "type": "text", + "content": " and is reduced when training iterations reach specific milestones, following a Multi-StepLR decay strategy with a factor of 0.5. The total number of iterations is 150K. The teacher model is only used as a reference for guiding architectural refinement and remains frozen throughout the training." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 314, + 468, + 384, + 480 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 468, + 384, + 480 + ], + "spans": [ + { + "bbox": [ + 314, + 468, + 384, + 480 + ], + "type": "text", + "content": "4.34. SylabSR" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 486, + 554, + 617 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 486, + 554, + 617 + ], + "spans": [ + { + "bbox": [ + 313, + 486, + 554, + 617 + ], + "type": "text", + "content": "Method. Inspired by RLFN [56] and VARSR [88], they propose an AutoRegressive Residual Local Feature Network (AR-RLFN) to implement a two-stage super-resolution framework. Specifically, they build a lightweight version of RLFN targeting " + }, + { + "bbox": [ + 313, + 486, + 554, + 617 + ], + "type": "inline_equation", + "content": "2 \\times" + }, + { + "bbox": [ + 313, + 486, + 554, + 617 + ], + "type": "text", + "content": " super-resolution, meaning that the final " + }, + { + "bbox": [ + 313, + 486, + 554, + 617 + ], + "type": "inline_equation", + "content": "4 \\times" + }, + { + "bbox": [ + 313, + 486, + 554, + 617 + ], + "type": "text", + "content": " SR image is generated from an intermediate " + }, + { + "bbox": [ + 313, + 486, + 554, + 617 + ], + "type": "inline_equation", + "content": "2 \\times" + }, + { + "bbox": [ + 313, + 486, + 554, + 617 + ], + "type": "text", + "content": " SR image produced by the same model. The overall framework of AR-RLFN is shown in Fig. 37. Although the model needs to be run twice, the " + }, + { + "bbox": [ + 313, + 486, + 554, + 617 + ], + "type": "inline_equation", + "content": "2 \\times" + }, + { + "bbox": [ + 313, + 486, + 554, + 617 + ], + "type": "text", + "content": " SR task requires significantly fewer parameters and FLOPs compared to the original one, making the approach efficient overall." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 617, + 555, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 617, + 555, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 617, + 555, + 713 + ], + "type": "text", + "content": "The modified structure of RLFN is further inspired by R2Net [91]. Benefiting from the two-stage strategy, their model is able to operate with fewer parameters. In their framework, they adopt three Residual Local Feature Blocks (RLFBs) with a reduced number of channels compared to the original version. Additionally, they replace ReLU with LeakyReLU to mitigate gradient vanishing. For reparameterization, they employ the Residual-in-Residual Rep Block" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 732, + 311, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 732, + 311, + 742 + ], + "spans": [ + { + "bbox": [ + 299, + 732, + 311, + 742 + ], + "type": "text", + "content": "35" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 34 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 59, + 72, + 294, + 280 + ], + "blocks": [ + { + "bbox": [ + 59, + 72, + 294, + 280 + ], + "lines": [ + { + "bbox": [ + 59, + 72, + 294, + 280 + ], + "spans": [ + { + "bbox": [ + 59, + 72, + 294, + 280 + ], + "type": "image", + "image_path": "17821671883df0836f9272ab356f13d8f4e20543a0f3382103eb12be5c27e5b0.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 292, + 295, + 315 + ], + "lines": [ + { + "bbox": [ + 55, + 292, + 295, + 315 + ], + "spans": [ + { + "bbox": [ + 55, + 292, + 295, + 315 + ], + "type": "text", + "content": "Figure 37. Team SylabSR: The structure of (up) AR-RLFN, (a) RLFB, (b) RRRB and (c) its reparameterization." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 337, + 295, + 372 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 337, + 295, + 372 + ], + "spans": [ + { + "bbox": [ + 55, + 337, + 295, + 372 + ], + "type": "text", + "content": "(RRRB) [26] for improved compression, which reduces the number of parameters during inference by approximately " + }, + { + "bbox": [ + 55, + 337, + 295, + 372 + ], + "type": "inline_equation", + "content": "45\\%" + }, + { + "bbox": [ + 55, + 337, + 295, + 372 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 373, + 296, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 373, + 296, + 422 + ], + "spans": [ + { + "bbox": [ + 55, + 373, + 296, + 422 + ], + "type": "text", + "content": "Training Strategy. They train their network on DIV2K [104] and LSDIR [64] datasets, and augment the training data using random flipping and rotation. The training process is divided into three stages:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 56, + 422, + 296, + 625 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 56, + 422, + 296, + 517 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 422, + 296, + 517 + ], + "spans": [ + { + "bbox": [ + 56, + 422, + 296, + 517 + ], + "type": "text", + "content": "1. HR patches of size " + }, + { + "bbox": [ + 56, + 422, + 296, + 517 + ], + "type": "inline_equation", + "content": "512 \\times 512" + }, + { + "bbox": [ + 56, + 422, + 296, + 517 + ], + "type": "text", + "content": " are randomly cropped from the ground truth DIV2K images. In this stage, the model performs " + }, + { + "bbox": [ + 56, + 422, + 296, + 517 + ], + "type": "inline_equation", + "content": "2 \\times" + }, + { + "bbox": [ + 56, + 422, + 296, + 517 + ], + "type": "text", + "content": " super-resolution. The number of channels in the RRRB is set to 12, and the batch size is set to 32. They use the Adam optimizer to minimize the Charbonnier loss, with the learning rate set to " + }, + { + "bbox": [ + 56, + 422, + 296, + 517 + ], + "type": "inline_equation", + "content": "5\\mathrm{e}^{-4}" + }, + { + "bbox": [ + 56, + 422, + 296, + 517 + ], + "type": "text", + "content": ". The training runs for 100k iterations, and the learning rate is halved every 20k iterations." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 518, + 295, + 566 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 518, + 295, + 566 + ], + "spans": [ + { + "bbox": [ + 56, + 518, + 295, + 566 + ], + "type": "text", + "content": "2. HR patches of size " + }, + { + "bbox": [ + 56, + 518, + 295, + 566 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 56, + 518, + 295, + 566 + ], + "type": "text", + "content": " are randomly cropped from the ground truth DIV2K images. The model again performs " + }, + { + "bbox": [ + 56, + 518, + 295, + 566 + ], + "type": "inline_equation", + "content": "2 \\times" + }, + { + "bbox": [ + 56, + 518, + 295, + 566 + ], + "type": "text", + "content": " super-resolution in this stage. The remaining configurations are the same as in Stage 1." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 56, + 566, + 295, + 625 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 566, + 295, + 625 + ], + "spans": [ + { + "bbox": [ + 56, + 566, + 295, + 625 + ], + "type": "text", + "content": "3. HR patches of size " + }, + { + "bbox": [ + 56, + 566, + 295, + 625 + ], + "type": "inline_equation", + "content": "512 \\times 512" + }, + { + "bbox": [ + 56, + 566, + 295, + 625 + ], + "type": "text", + "content": " are randomly cropped from both the DIV2K and LSDIR datasets. In this stage, they use the Adam optimizer to minimize MSE loss, with the learning rate set to " + }, + { + "bbox": [ + 56, + 566, + 295, + 625 + ], + "type": "inline_equation", + "content": "2\\mathrm{e}^{-4}" + }, + { + "bbox": [ + 56, + 566, + 295, + 625 + ], + "type": "text", + "content": ". The training runs for 50k iterations, and the learning rate is halved every 10k iterations." + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 55, + 635, + 127, + 647 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 635, + 127, + 647 + ], + "spans": [ + { + "bbox": [ + 55, + 635, + 127, + 647 + ], + "type": "text", + "content": "4.35. NJUPCA" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 55, + 653, + 296, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 653, + 296, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 653, + 296, + 713 + ], + "type": "text", + "content": "General Method Description. Inspired by SPAN [112], they propose the Spatial Frequency Network (SFNet), which fully leverages both spatial and frequency domain representations. SFNet integrates Frequency Knowledge Miner (FKM) modules after each Spatial Attention Block" + } + ] + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 321, + 72, + 548, + 175 + ], + "blocks": [ + { + "bbox": [ + 321, + 72, + 548, + 175 + ], + "lines": [ + { + "bbox": [ + 321, + 72, + 548, + 175 + ], + "spans": [ + { + "bbox": [ + 321, + 72, + 548, + 175 + ], + "type": "image", + "image_path": "b1af22d432546be3d58f726de7b0d76a5692472560a730d2902ecb22dbc465ac.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 313, + 186, + 553, + 208 + ], + "lines": [ + { + "bbox": [ + 313, + 186, + 553, + 208 + ], + "spans": [ + { + "bbox": [ + 313, + 186, + 553, + 208 + ], + "type": "text", + "content": "Figure 38. Team NJUPCA: The detailed architecture of the designed FKM." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 218, + 553, + 277 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 218, + 553, + 277 + ], + "spans": [ + { + "bbox": [ + 313, + 218, + 553, + 277 + ], + "type": "text", + "content": "(SPAB) to capture frequency domain features, complementing the spatial features extracted by SPAB. This parallel design enables the network to effectively learn and combine spatial and frequency domain representations, enhancing the performance of super-resolution reconstruction." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 278, + 554, + 373 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 278, + 554, + 373 + ], + "spans": [ + { + "bbox": [ + 313, + 278, + 554, + 373 + ], + "type": "text", + "content": "As illustrated in Fig. 38, the frequency knowledge miner (FKM) is designed to learn frequency representation from input, which comprises two core components: multi-band frequency learner (MBFL) and full-frequency adjustment learner (FFAL). MBFL aims to enhancing frequency representation by focusing on distinct frequency bands, while FFAL adjusts frequency-domain features from a full-frequency perspective." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 373, + 553, + 398 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 373, + 553, + 398 + ], + "spans": [ + { + "bbox": [ + 313, + 373, + 553, + 398 + ], + "type": "text", + "content": "Training Details. They employ two-stage training paradigm:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 314, + 399, + 553, + 519 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 314, + 399, + 553, + 471 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 399, + 553, + 471 + ], + "spans": [ + { + "bbox": [ + 314, + 399, + 553, + 471 + ], + "type": "text", + "content": "- **Stage I - Foundation Training:** Randomly initialized weights are trained on DIV2K and full LSDIR datasets using " + }, + { + "bbox": [ + 314, + 399, + 553, + 471 + ], + "type": "inline_equation", + "content": "128 \\times 128" + }, + { + "bbox": [ + 314, + 399, + 553, + 471 + ], + "type": "text", + "content": " HR patches. Configuration: Adam optimizer (" + }, + { + "bbox": [ + 314, + 399, + 553, + 471 + ], + "type": "inline_equation", + "content": "\\beta_{1} = 0.9" + }, + { + "bbox": [ + 314, + 399, + 553, + 471 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 314, + 399, + 553, + 471 + ], + "type": "inline_equation", + "content": "\\beta_{2} = 0.999" + }, + { + "bbox": [ + 314, + 399, + 553, + 471 + ], + "type": "text", + "content": ") with L1 loss, initial learning rate " + }, + { + "bbox": [ + 314, + 399, + 553, + 471 + ], + "type": "inline_equation", + "content": "5 \\times 10^{-4}" + }, + { + "bbox": [ + 314, + 399, + 553, + 471 + ], + "type": "text", + "content": " (halved every 200 epochs), batch size 64 over 1,000 epochs (34 hours on " + }, + { + "bbox": [ + 314, + 399, + 553, + 471 + ], + "type": "inline_equation", + "content": "4 \\times" + }, + { + "bbox": [ + 314, + 399, + 553, + 471 + ], + "type": "text", + "content": " NVIDIA A6000)." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 314, + 472, + 553, + 519 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 472, + 553, + 519 + ], + "spans": [ + { + "bbox": [ + 314, + 472, + 553, + 519 + ], + "type": "text", + "content": "- Stage II - Refinement: Initialized with Stage I weights, fine-tuned using DIV2K and LSDIR subset. Configuration: L2 loss with cosine learning schedule (" + }, + { + "bbox": [ + 314, + 472, + 553, + 519 + ], + "type": "inline_equation", + "content": "\\eta_{\\mathrm{initial}} = 1 \\times 10^{-4}" + }, + { + "bbox": [ + 314, + 472, + 553, + 519 + ], + "type": "text", + "content": "), 500 epochs." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 313, + 521, + 553, + 556 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 521, + 553, + 556 + ], + "spans": [ + { + "bbox": [ + 313, + 521, + 553, + 556 + ], + "type": "text", + "content": "Other details: Training employed standard data augmentation (random rotation and flipping) without additional regularization techniques." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 313, + 564, + 390, + 577 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 564, + 390, + 577 + ], + "spans": [ + { + "bbox": [ + 313, + 564, + 390, + 577 + ], + "type": "text", + "content": "4.36. DepthIBN" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 313, + 582, + 555, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 582, + 555, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 582, + 555, + 713 + ], + "type": "text", + "content": "Single Image Super-Resolution (SISR) still faces challenges such as a large number of parameters, high memory consumption, and slow training and inference speed, despite significant advancements. These issues limit the practical use of SISR methods in real-world scenarios. Therefore, recent research has focused on developing lightweight models and optimizing network architectures. Among these techniques, Information Distillation is used to extract important features by splitting channels [43, 45, 67, 71]. One of the main challenges of CNNs is the high computational cost of convolution operations. To reduce this cost," + } + ] + } + ], + "index": 20 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 732, + 311, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 732, + 311, + 742 + ], + "spans": [ + { + "bbox": [ + 299, + 732, + 311, + 742 + ], + "type": "text", + "content": "36" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 35 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 57, + 70, + 289, + 223 + ], + "blocks": [ + { + "bbox": [ + 57, + 70, + 289, + 223 + ], + "lines": [ + { + "bbox": [ + 57, + 70, + 289, + 223 + ], + "spans": [ + { + "bbox": [ + 57, + 70, + 289, + 223 + ], + "type": "image", + "image_path": "2c3df1452537de9ebf29c6ff074f05d09538ce0acd4db76b079e71e1940d1cb2.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 235, + 295, + 257 + ], + "lines": [ + { + "bbox": [ + 55, + 235, + 295, + 257 + ], + "spans": [ + { + "bbox": [ + 55, + 235, + 295, + 257 + ], + "type": "text", + "content": "Figure 39. Team DepthIBN: Involution and BSConv Multi-Depth Distillation Block (IBMDB)." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 54, + 281, + 295, + 435 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 281, + 295, + 435 + ], + "spans": [ + { + "bbox": [ + 54, + 281, + 295, + 435 + ], + "type": "text", + "content": "the Depthwise Separable Convolution (DSConv) [40, 135] method was introduced, but due to the separate processing of channels, some information may be lost. To address this issue, BSCov optimizes feature processing by utilizing kernel correlations, improving performance and reducing computations [34]. Furthermore, shown in Fig. 39, Involution replaces fixed filters with pixel-dependent dynamic filters, making it more sensitive to spatial variations and better at capturing long-range dependencies between pixels [60]. Involution not only reduces parameters and resource consumption but also provides better performance compared to convolution-based models due to its superior feature extraction capability." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 437, + 295, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 437, + 295, + 616 + ], + "spans": [ + { + "bbox": [ + 55, + 437, + 295, + 616 + ], + "type": "text", + "content": "Method. They used the IBMDN model in this challenge, following previous studies in the field of Lightweight Image Super-Resolution [6]. They propose an Involution and BSConv Multi-Depth Distillation Network (IBMDN), consisting of 6 Involution and BSConv Multi-Depth Distillation Blocks (IBMDB). IBMDB integrates Involution and BSConv to balance computational efficiency and feature extraction. The overall architecture of their proposed model consists of four main sections: shallow feature extraction, deep feature extraction, feature fusion, and reconstruction. A " + }, + { + "bbox": [ + 55, + 437, + 295, + 616 + ], + "type": "inline_equation", + "content": "3 \\times 3" + }, + { + "bbox": [ + 55, + 437, + 295, + 616 + ], + "type": "text", + "content": " convolution is used to extract shallow features. Then, through 6 IBMDB blocks, deep features are extracted and fused using a " + }, + { + "bbox": [ + 55, + 437, + 295, + 616 + ], + "type": "inline_equation", + "content": "1 \\times 1" + }, + { + "bbox": [ + 55, + 437, + 295, + 616 + ], + "type": "text", + "content": " convolution, followed by refinement through a " + }, + { + "bbox": [ + 55, + 437, + 295, + 616 + ], + "type": "inline_equation", + "content": "3 \\times 3" + }, + { + "bbox": [ + 55, + 437, + 295, + 616 + ], + "type": "text", + "content": " convolution. The pixel-shuffle operation is then used as the reconstruction module." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 617, + 295, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 617, + 295, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 617, + 295, + 713 + ], + "type": "text", + "content": "The Involution and BSConv Multi-Depth Distillation Block (IBMDB) consists of three shallow residual blocks (SRB_IBMD) and one channel contrast attention (CCA) block. Based on previous experiments, the use of " + }, + { + "bbox": [ + 55, + 617, + 295, + 713 + ], + "type": "inline_equation", + "content": "3 \\times 3" + }, + { + "bbox": [ + 55, + 617, + 295, + 713 + ], + "type": "text", + "content": " convolutions, due to computational complexity and a large number of parameters, is not always the best option, especially for lightweight super-resolution models [5]. In SISR models, a fixed structure for feature extraction blocks is" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 313, + 72, + 553, + 167 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 553, + 167 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 553, + 167 + ], + "type": "text", + "content": "usually used, while features extracted at different depths of the network may differ. This approach may prevent the model from fully exploiting its capacity. Designing blocks with varying structures tailored to the depth of the network can enhance model performance. In their proposed model, the block structure is adjusted based on network depth to achieve an optimal feature extraction combination at different levels." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 313, + 168, + 555, + 336 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 168, + 555, + 336 + ], + "spans": [ + { + "bbox": [ + 313, + 168, + 555, + 336 + ], + "type": "text", + "content": "BSCnv reduces parameters using intra-kernel correlation, better preserves information, and improves model accuracy without increasing complexity. Involution, with fewer learning parameters, extracts visual features through its attention mechanism and increases efficiency. Therefore, in the Information distillation structure, they consider the block structure differently. At the beginning of the network, BSCnv is dominant in maintaining pixel correlation and local interactions within the block, and with increasing depth, Involution becomes the dominant operator. If BSCnv is denoted by B and Involution by I, the optimal block combination in the deep feature extraction section is as follows: BBB-BBB-BIB-BIB-IBI-IBI. The details of the blocks are shown in the Fig. 39." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 313, + 346, + 389, + 357 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 346, + 389, + 357 + ], + "spans": [ + { + "bbox": [ + 313, + 346, + 389, + 357 + ], + "type": "text", + "content": "4.37. Cidaut AI" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 364, + 554, + 423 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 364, + 554, + 423 + ], + "spans": [ + { + "bbox": [ + 313, + 364, + 554, + 423 + ], + "type": "text", + "content": "They propose a lightweight yet effective network with three blocks: an initial Sobel-based block and two ESA-based edge refinement blocks, regulated by a global residual connection. Upscaling is performed via pixel shuffle for efficient super-resolution." + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 318, + 438, + 552, + 594 + ], + "blocks": [ + { + "bbox": [ + 318, + 438, + 552, + 594 + ], + "lines": [ + { + "bbox": [ + 318, + 438, + 552, + 594 + ], + "spans": [ + { + "bbox": [ + 318, + 438, + 552, + 594 + ], + "type": "image", + "image_path": "0efe7737ced44ffc023622d39d82998bc7cf70f73b427d5f237ad9c301089f96.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 313, + 605, + 555, + 639 + ], + "lines": [ + { + "bbox": [ + 313, + 605, + 555, + 639 + ], + "spans": [ + { + "bbox": [ + 313, + 605, + 555, + 639 + ], + "type": "text", + "content": "Figure 40. Team Cidaut AI: Fused Edge Attention Network (FEAN) structure. They also show the Sobel Fused Residual Block (SFRB) and the Inverted Residual Bottlenecks (IRB) [86]." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 654, + 554, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 654, + 554, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 654, + 554, + 713 + ], + "type": "text", + "content": "As shown in Fig. 40, the design integrates two MobileNet Inverted Bottlenecks [86] with channel shuffle and SiLU activation for enhanced information mixing. Inspired by EFDN [117], Sobel-based attention extracts edge features, refined using partial convolutions [84] with minimal" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 732, + 310, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 732, + 310, + 741 + ], + "spans": [ + { + "bbox": [ + 299, + 732, + 310, + 741 + ], + "type": "text", + "content": "37" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 36 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 62, + 72, + 291, + 184 + ], + "blocks": [ + { + "bbox": [ + 62, + 72, + 291, + 184 + ], + "lines": [ + { + "bbox": [ + 62, + 72, + 291, + 184 + ], + "spans": [ + { + "bbox": [ + 62, + 72, + 291, + 184 + ], + "type": "image", + "image_path": "e80fade4787f96534f1e2ef24e32d09dadef36238ccf578c2e38675c21631355.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 197, + 295, + 220 + ], + "lines": [ + { + "bbox": [ + 55, + 197, + 295, + 220 + ], + "spans": [ + { + "bbox": [ + 55, + 197, + 295, + 220 + ], + "type": "text", + "content": "Figure 41. Team Cidaut AI: Structure of the Enhanced ESA Block (EEB)." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 241, + 296, + 301 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 241, + 296, + 301 + ], + "spans": [ + { + "bbox": [ + 55, + 241, + 296, + 301 + ], + "type": "text", + "content": "parameter increase. The final attention map, a weighted sum of refined " + }, + { + "bbox": [ + 55, + 241, + 296, + 301 + ], + "type": "inline_equation", + "content": "Gx" + }, + { + "bbox": [ + 55, + 241, + 296, + 301 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 55, + 241, + 296, + 301 + ], + "type": "inline_equation", + "content": "Gy" + }, + { + "bbox": [ + 55, + 241, + 296, + 301 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 55, + 241, + 296, + 301 + ], + "type": "inline_equation", + "content": "GxGy" + }, + { + "bbox": [ + 55, + 241, + 296, + 301 + ], + "type": "text", + "content": ", undergoes further refinement via partial convolution. A final " + }, + { + "bbox": [ + 55, + 241, + 296, + 301 + ], + "type": "inline_equation", + "content": "1 \\times 1" + }, + { + "bbox": [ + 55, + 241, + 296, + 301 + ], + "type": "text", + "content": " convolution preserves details while preventing excessive edge processing." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 301, + 296, + 409 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 301, + 296, + 409 + ], + "spans": [ + { + "bbox": [ + 55, + 301, + 296, + 409 + ], + "type": "text", + "content": "The proposed ERIB block, an efficient convolutional unit with self-activation, starts with depthwise convolution and " + }, + { + "bbox": [ + 55, + 301, + 296, + 409 + ], + "type": "inline_equation", + "content": "1 \\times 1" + }, + { + "bbox": [ + 55, + 301, + 296, + 409 + ], + "type": "text", + "content": " feature expansion [86]. Partial convolutions [84] refine features, while channel shuffle enhances mixing. Inspired by Simple Gate [10], they introduce nonlinearity by reducing channels without increasing parameters. A weighted residual connection with partial convolution ensures effective information propagation, maintaining competitive performance despite PyTorch inefficiencies." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 410, + 296, + 470 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 410, + 296, + 470 + ], + "spans": [ + { + "bbox": [ + 55, + 410, + 296, + 470 + ], + "type": "text", + "content": "For the EEB in Fig. 41, they draw inspiration from the ReNRB block [91], replacing reparameterized convolutions with ERIB for improved efficiency. Partial convolutions in the ESA bottleneck and residual connections further exploit feature map redundancy." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 470, + 296, + 578 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 470, + 296, + 578 + ], + "spans": [ + { + "bbox": [ + 55, + 470, + 296, + 578 + ], + "type": "text", + "content": "Training Strategy. The training was carried out using the DIV2K, FLICK2R, and LSIDR (30%) datasets to improve the model's generalization ability. As a baseline, the model was trained for 1000 epochs with a cosine annealing learning rate scheduler, a crop size of " + }, + { + "bbox": [ + 55, + 470, + 296, + 578 + ], + "type": "inline_equation", + "content": "512 \\times 512" + }, + { + "bbox": [ + 55, + 470, + 296, + 578 + ], + "type": "text", + "content": ", and a batch size of 16. Due to instability in the loss during training, an optimal learning rate analysis was performed whenever the loss diverged. This led to the implementation of a learning rate sweep strategy, which was organized into 5 stages." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 586, + 104, + 598 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 586, + 104, + 598 + ], + "spans": [ + { + "bbox": [ + 55, + 586, + 104, + 598 + ], + "type": "text", + "content": "4.38.IVL" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 605, + 295, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 605, + 295, + 665 + ], + "spans": [ + { + "bbox": [ + 55, + 605, + 295, + 665 + ], + "type": "text", + "content": "Method. Their approach builds upon the strategy used in SPAN [108], last year's winning method, to extract attention maps and integrates it into the proposed baseline architecture, EFDN [116], aiming to enhance feature extraction and structural representation in image processing tasks." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 55, + 665, + 296, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 665, + 296, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 665, + 296, + 713 + ], + "type": "text", + "content": "Specifically, as illustrated in Figure 42, this strategy is incorporated within the EDBB blocks of EFDN, which are designed to capture fundamental structural features of an image by applying Sobel and Laplacian filters. These fil" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 72, + 554, + 133 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 554, + 133 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 554, + 133 + ], + "type": "text", + "content": "ters emphasize edge and texture information, contributing to improved representation learning. During the inference phase, the EDBB blocks are reparametrized into 3x3 convolutions to maintain computational efficiency while preserving learned feature representations." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 142, + 554, + 226 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 142, + 554, + 226 + ], + "spans": [ + { + "bbox": [ + 313, + 142, + 554, + 226 + ], + "type": "text", + "content": "The attention maps are derived following the approach implemented in SPAN, leveraging an activation function that is both odd and symmetric to effectively highlight essential regions of the image. These attention maps serve as a direct substitute for the ESA block present in the original EFDN model, aiming to refine feature selection and enhance the model's overall performance." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 236, + 554, + 284 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 236, + 554, + 284 + ], + "spans": [ + { + "bbox": [ + 313, + 236, + 554, + 284 + ], + "type": "text", + "content": "As a result of the applied modifications, the final architecture has a lower parameter count and requires fewer floating-point operations compared to the proposed baseline method, EFDN." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 294, + 554, + 331 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 294, + 554, + 331 + ], + "spans": [ + { + "bbox": [ + 313, + 294, + 554, + 331 + ], + "type": "text", + "content": "Training Details. The training process is structured into three progressive phases to optimize performance and stability:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 314, + 340, + 555, + 674 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 314, + 340, + 555, + 460 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 340, + 555, + 460 + ], + "spans": [ + { + "bbox": [ + 314, + 340, + 555, + 460 + ], + "type": "text", + "content": "- Pre-training: The model undergoes an initial training phase using the DIV2K dataset, incorporating data augmentation techniques such as random rotations, horizontal flipping, and random cropping to generate patches of size " + }, + { + "bbox": [ + 314, + 340, + 555, + 460 + ], + "type": "inline_equation", + "content": "64 \\times 64" + }, + { + "bbox": [ + 314, + 340, + 555, + 460 + ], + "type": "text", + "content": ". Training is conducted over 30,000 iterations with a batch size of 32, utilizing the Adam optimizer " + }, + { + "bbox": [ + 314, + 340, + 555, + 460 + ], + "type": "inline_equation", + "content": "(\\beta_{1} = 0.9, \\beta_{2} = 0.999)" + }, + { + "bbox": [ + 314, + 340, + 555, + 460 + ], + "type": "text", + "content": ". The learning rate is initially set to 1e-3 for the first 20,000 iterations and subsequently reduced to 1e-4 for the remaining 10,000 iterations. L1 loss is used throughout this phase." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 314, + 460, + 554, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 460, + 554, + 567 + ], + "spans": [ + { + "bbox": [ + 314, + 460, + 554, + 567 + ], + "type": "text", + "content": "- First training stage: The model is further refined using the DIV2K_LSDIR dataset, while maintaining the same augmentation strategies as in the pre-training phase. The patch size is increased to " + }, + { + "bbox": [ + 314, + 460, + 554, + 567 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 314, + 460, + 554, + 567 + ], + "type": "text", + "content": ", and training is extended to 100,000 iterations with a batch size of 64. The Adam optimizer " + }, + { + "bbox": [ + 314, + 460, + 554, + 567 + ], + "type": "inline_equation", + "content": "(\\beta_{1} = 0.9, \\beta_{2} = 0.999)" + }, + { + "bbox": [ + 314, + 460, + 554, + 567 + ], + "type": "text", + "content": " is employed, starting with a learning rate of 5e-4, which undergoes a decay by a factor of 0.5 every 20,000 iterations. L1 loss remains the chosen loss function for this stage." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 314, + 568, + 554, + 674 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 568, + 554, + 674 + ], + "spans": [ + { + "bbox": [ + 314, + 568, + 554, + 674 + ], + "type": "text", + "content": "- Second training stage: In the final phase, training continues on the DIV2K_LSDIR dataset with an expanded patch size of " + }, + { + "bbox": [ + 314, + 568, + 554, + 674 + ], + "type": "inline_equation", + "content": "512 \\times 512" + }, + { + "bbox": [ + 314, + 568, + 554, + 674 + ], + "type": "text", + "content": " for an additional 40,000 iterations. The same augmentation methods are retained, and most hyperparameters remain unchanged. However, to ensure stable convergence and fine-tune performance, the learning rate is reduced to 5e-5. During this stage, L1 loss is applied for the first 10,000 iterations, after which L2 loss is utilized to enhance final model performance." + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 322, + 675, + 554, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 322, + 675, + 554, + 712 + ], + "spans": [ + { + "bbox": [ + 322, + 675, + 554, + 712 + ], + "type": "text", + "content": "All the training phases were performed of the model a single NVIDIA RTX 4070 Super GPU and required approximately 20 hours." + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 732, + 310, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 732, + 310, + 741 + ], + "spans": [ + { + "bbox": [ + 299, + 732, + 310, + 741 + ], + "type": "text", + "content": "38" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 37 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 55, + 70, + 558, + 250 + ], + "blocks": [ + { + "bbox": [ + 55, + 70, + 558, + 250 + ], + "lines": [ + { + "bbox": [ + 55, + 70, + 558, + 250 + ], + "spans": [ + { + "bbox": [ + 55, + 70, + 558, + 250 + ], + "type": "image", + "image_path": "a34fd9595397439c984a401aa9617a0634a85b8e638fa7ea12403f01e0a3c2f6.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 201, + 256, + 408, + 269 + ], + "lines": [ + { + "bbox": [ + 201, + 256, + 408, + 269 + ], + "spans": [ + { + "bbox": [ + 201, + 256, + 408, + 269 + ], + "type": "text", + "content": "Figure 42. Team IVL: Schematic diagram of the method." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 56, + 288, + 153, + 301 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 288, + 153, + 301 + ], + "spans": [ + { + "bbox": [ + 56, + 288, + 153, + 301 + ], + "type": "text", + "content": "Acknowledgments" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 308, + 296, + 380 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 308, + 296, + 380 + ], + "spans": [ + { + "bbox": [ + 55, + 308, + 296, + 380 + ], + "type": "text", + "content": "This work was partially supported by the Humboldt Foundation, the Ministry of Education and Science of Bulgaria (support for INSAIT, part of the Bulgarian National Roadmap for Research Infrastructure). We thank the NTIRE 2025 sponsors: ByteDance, Meituan, Kuaishou, and University of Wurzburg (Computer Vision Lab)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 389, + 188, + 402 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 389, + 188, + 402 + ], + "spans": [ + { + "bbox": [ + 55, + 389, + 188, + 402 + ], + "type": "text", + "content": "A. Teams and Affiliations" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 408, + 175, + 420 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 408, + 175, + 420 + ], + "spans": [ + { + "bbox": [ + 55, + 408, + 175, + 420 + ], + "type": "text", + "content": "NTIRE 2025 ESR Teams" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 426, + 290, + 438 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 426, + 290, + 438 + ], + "spans": [ + { + "bbox": [ + 55, + 426, + 290, + 438 + ], + "type": "text", + "content": "Title: NTIRE 2025 Efficient Super-Resolution Challenge" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 439, + 100, + 449 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 439, + 100, + 449 + ], + "spans": [ + { + "bbox": [ + 55, + 439, + 100, + 449 + ], + "type": "text", + "content": "Members:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 55, + 449, + 186, + 461 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 449, + 186, + 461 + ], + "spans": [ + { + "bbox": [ + 55, + 449, + 186, + 461 + ], + "type": "text", + "content": "Bin Ren" + }, + { + "bbox": [ + 55, + 449, + 186, + 461 + ], + "type": "inline_equation", + "content": "^{1,2,4}" + }, + { + "bbox": [ + 55, + 449, + 186, + 461 + ], + "type": "text", + "content": " (bin. ren@unitn.it)," + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 55, + 462, + 195, + 474 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 462, + 195, + 474 + ], + "spans": [ + { + "bbox": [ + 55, + 462, + 195, + 474 + ], + "type": "text", + "content": "Hang Guo" + }, + { + "bbox": [ + 55, + 462, + 195, + 474 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 55, + 462, + 195, + 474 + ], + "type": "text", + "content": " (cshguo@gmail.com)," + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 55, + 474, + 170, + 486 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 474, + 170, + 486 + ], + "spans": [ + { + "bbox": [ + 55, + 474, + 170, + 486 + ], + "type": "text", + "content": "Lei Sun4 (lei.sun@insait.ai)" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 55, + 486, + 254, + 498 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 486, + 254, + 498 + ], + "spans": [ + { + "bbox": [ + 55, + 486, + 254, + 498 + ], + "type": "text", + "content": "Zongwei Wu5 (zongwei.wu@uni-wuerzburg.de)," + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 55, + 498, + 252, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 498, + 252, + 510 + ], + "spans": [ + { + "bbox": [ + 55, + 498, + 252, + 510 + ], + "type": "text", + "content": "Radu Timofte" + }, + { + "bbox": [ + 55, + 498, + 252, + 510 + ], + "type": "inline_equation", + "content": "^{5}" + }, + { + "bbox": [ + 55, + 498, + 252, + 510 + ], + "type": "text", + "content": " (radu.timofte@vision.ee.ethz.ch)" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 55, + 510, + 204, + 522 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 510, + 204, + 522 + ], + "spans": [ + { + "bbox": [ + 55, + 510, + 204, + 522 + ], + "type": "text", + "content": "Yawei " + }, + { + "bbox": [ + 55, + 510, + 204, + 522 + ], + "type": "inline_equation", + "content": "\\mathrm{Li^{6}}" + }, + { + "bbox": [ + 55, + 510, + 204, + 522 + ], + "type": "text", + "content": " (li.yawei.ai@gmail.com)," + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 55, + 522, + 107, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 522, + 107, + 533 + ], + "spans": [ + { + "bbox": [ + 55, + 522, + 107, + 533 + ], + "type": "text", + "content": "Affiliations:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 56, + 534, + 294, + 604 + ], + "type": "list", + "angle": 0, + "index": 21, + "blocks": [ + { + "bbox": [ + 57, + 534, + 160, + 545 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 534, + 160, + 545 + ], + "spans": [ + { + "bbox": [ + 57, + 534, + 160, + 545 + ], + "type": "text", + "content": "1 University of Pisa, Italy" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 56, + 546, + 170, + 557 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 546, + 170, + 557 + ], + "spans": [ + { + "bbox": [ + 56, + 546, + 170, + 557 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 56, + 546, + 170, + 557 + ], + "type": "text", + "content": " University of Trento, Italy" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 56, + 558, + 175, + 569 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 558, + 175, + 569 + ], + "spans": [ + { + "bbox": [ + 56, + 558, + 175, + 569 + ], + "type": "text", + "content": "3 Tsinghua University, China" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 56, + 569, + 294, + 582 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 569, + 294, + 582 + ], + "spans": [ + { + "bbox": [ + 56, + 569, + 294, + 582 + ], + "type": "text", + "content": "4 INSÄIT, Sofia University,\"St. Kliment Ohridski\", Bulgaria" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 56, + 582, + 294, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 582, + 294, + 594 + ], + "spans": [ + { + "bbox": [ + 56, + 582, + 294, + 594 + ], + "type": "text", + "content": "5 Computer Vision Lab, University of Würzburg, Germany" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 56, + 594, + 167, + 604 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 594, + 167, + 604 + ], + "spans": [ + { + "bbox": [ + 56, + 594, + 167, + 604 + ], + "type": "inline_equation", + "content": "^{6}" + }, + { + "bbox": [ + 56, + 594, + 167, + 604 + ], + "type": "text", + "content": " ETH Zürich, Switzerland" + } + ] + } + ], + "index": 20 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 56, + 624, + 90, + 635 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 624, + 90, + 635 + ], + "spans": [ + { + "bbox": [ + 56, + 624, + 90, + 635 + ], + "type": "text", + "content": "EMSR" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 55, + 641, + 295, + 653 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 641, + 295, + 653 + ], + "spans": [ + { + "bbox": [ + 55, + 641, + 295, + 653 + ], + "type": "text", + "content": "Title: Distillation-Supervised Convolutional Low-Rank" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 55, + 653, + 253, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 653, + 253, + 666 + ], + "spans": [ + { + "bbox": [ + 55, + 653, + 253, + 666 + ], + "type": "text", + "content": "Adaptation for Efficient Image Super-Resolution" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 56, + 666, + 100, + 676 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 666, + 100, + 676 + ], + "spans": [ + { + "bbox": [ + 56, + 666, + 100, + 676 + ], + "type": "text", + "content": "Members:" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 55, + 677, + 214, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 677, + 214, + 689 + ], + "spans": [ + { + "bbox": [ + 55, + 677, + 214, + 689 + ], + "type": "text", + "content": "Yao Zhang " + }, + { + "bbox": [ + 55, + 677, + 214, + 689 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 55, + 677, + 214, + 689 + ], + "type": "text", + "content": " (yao_zhang@sjtu.edu.cn)," + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 55, + 689, + 230, + 701 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 689, + 230, + 701 + ], + "spans": [ + { + "bbox": [ + 55, + 689, + 230, + 701 + ], + "type": "text", + "content": "Xinning Chai1 (chaixinning@sjtu.edu.cn)," + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 55, + 701, + 231, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 701, + 231, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 701, + 231, + 713 + ], + "type": "text", + "content": "Zhengxue Cheng1 (zxcheng@sjtu.edu.cn)," + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 314, + 289, + 517, + 300 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 289, + 517, + 300 + ], + "spans": [ + { + "bbox": [ + 314, + 289, + 517, + 300 + ], + "type": "text", + "content": "Yingsheng Qin " + }, + { + "bbox": [ + 314, + 289, + 517, + 300 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 314, + 289, + 517, + 300 + ], + "type": "text", + "content": " (yingsheng.qin@transsion.com)," + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 315, + 300, + 490, + 312 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 300, + 490, + 312 + ], + "spans": [ + { + "bbox": [ + 315, + 300, + 490, + 312 + ], + "type": "text", + "content": "Yucai Yang " + }, + { + "bbox": [ + 315, + 300, + 490, + 312 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 315, + 300, + 490, + 312 + ], + "type": "text", + "content": " (yucai.yang@transsion.com)," + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 315, + 312, + 447, + 325 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 312, + 447, + 325 + ], + "spans": [ + { + "bbox": [ + 315, + 312, + 447, + 325 + ], + "type": "text", + "content": "Li Song " + }, + { + "bbox": [ + 315, + 312, + 447, + 325 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 315, + 312, + 447, + 325 + ], + "type": "text", + "content": " (song_li@sjtu.edu.cn)," + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 315, + 325, + 366, + 335 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 325, + 366, + 335 + ], + "spans": [ + { + "bbox": [ + 315, + 325, + 366, + 335 + ], + "type": "text", + "content": "Affiliations:" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 315, + 336, + 446, + 359 + ], + "type": "list", + "angle": 0, + "index": 35, + "blocks": [ + { + "bbox": [ + 315, + 336, + 446, + 349 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 336, + 446, + 349 + ], + "spans": [ + { + "bbox": [ + 315, + 336, + 446, + 349 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 315, + 336, + 446, + 349 + ], + "type": "text", + "content": " Shanghai Jiao Tong University" + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 315, + 349, + 400, + 359 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 349, + 400, + 359 + ], + "spans": [ + { + "bbox": [ + 315, + 349, + 400, + 359 + ], + "type": "text", + "content": "2 Transsion in China" + } + ] + } + ], + "index": 34 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 315, + 381, + 373, + 392 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 381, + 373, + 392 + ], + "spans": [ + { + "bbox": [ + 315, + 381, + 373, + 392 + ], + "type": "text", + "content": "XiaomiMM" + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 315, + 399, + 376, + 410 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 399, + 376, + 410 + ], + "spans": [ + { + "bbox": [ + 315, + 399, + 376, + 410 + ], + "type": "text", + "content": "Title: SPANF" + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 315, + 411, + 358, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 411, + 358, + 422 + ], + "spans": [ + { + "bbox": [ + 315, + 411, + 358, + 422 + ], + "type": "text", + "content": "Members:" + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 315, + 422, + 494, + 435 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 422, + 494, + 435 + ], + "spans": [ + { + "bbox": [ + 315, + 422, + 494, + 435 + ], + "type": "text", + "content": "Hongyuan " + }, + { + "bbox": [ + 315, + 422, + 494, + 435 + ], + "type": "inline_equation", + "content": "\\mathrm{Yu}^1" + }, + { + "bbox": [ + 315, + 422, + 494, + 435 + ], + "type": "text", + "content": " (yuhyuan1995@gmail.com)," + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 315, + 435, + 490, + 447 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 435, + 490, + 447 + ], + "spans": [ + { + "bbox": [ + 315, + 435, + 490, + 447 + ], + "type": "text", + "content": "Pufan " + }, + { + "bbox": [ + 315, + 435, + 490, + 447 + ], + "type": "inline_equation", + "content": "\\mathrm{Xu}^2" + }, + { + "bbox": [ + 315, + 435, + 490, + 447 + ], + "type": "text", + "content": " (xpf22@mails.tsinghua.edu.cn)," + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 315, + 447, + 476, + 459 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 447, + 476, + 459 + ], + "spans": [ + { + "bbox": [ + 315, + 447, + 476, + 459 + ], + "type": "text", + "content": "Cheng Wan3 (jouiney666@gmail.com)," + } + ] + } + ], + "index": 41 + }, + { + "bbox": [ + 315, + 459, + 503, + 471 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 459, + 503, + 471 + ], + "spans": [ + { + "bbox": [ + 315, + 459, + 503, + 471 + ], + "type": "text", + "content": "Zhijuan Huang1 (huangzhijuan@xiaomi.com)," + } + ] + } + ], + "index": 42 + }, + { + "bbox": [ + 315, + 471, + 471, + 483 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 471, + 471, + 483 + ], + "spans": [ + { + "bbox": [ + 315, + 471, + 471, + 483 + ], + "type": "text", + "content": "Peng Guo" + }, + { + "bbox": [ + 315, + 471, + 471, + 483 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 315, + 471, + 471, + 483 + ], + "type": "text", + "content": " (guopeng0100@163.com)," + } + ] + } + ], + "index": 43 + }, + { + "bbox": [ + 315, + 483, + 481, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 483, + 481, + 495 + ], + "spans": [ + { + "bbox": [ + 315, + 483, + 481, + 495 + ], + "type": "text", + "content": "Shuyuan Cui5 (jouiney666@gmail.com)," + } + ] + } + ], + "index": 44 + }, + { + "bbox": [ + 315, + 495, + 460, + 506 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 495, + 460, + 506 + ], + "spans": [ + { + "bbox": [ + 315, + 495, + 460, + 506 + ], + "type": "text", + "content": "Chenjun Li" + }, + { + "bbox": [ + 315, + 495, + 460, + 506 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 315, + 495, + 460, + 506 + ], + "type": "text", + "content": " (cl2733@cornell.edu)," + } + ] + } + ], + "index": 45 + }, + { + "bbox": [ + 315, + 506, + 477, + 519 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 506, + 477, + 519 + ], + "spans": [ + { + "bbox": [ + 315, + 506, + 477, + 519 + ], + "type": "text", + "content": "Xuehai Hu (hsquare@mail.ustc.edu.cn)," + } + ] + } + ], + "index": 46 + }, + { + "bbox": [ + 315, + 519, + 451, + 531 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 519, + 451, + 531 + ], + "spans": [ + { + "bbox": [ + 315, + 519, + 451, + 531 + ], + "type": "text", + "content": "Pan Pan1 (panpan@xiaomi.com)," + } + ] + } + ], + "index": 47 + }, + { + "bbox": [ + 315, + 531, + 479, + 543 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 531, + 479, + 543 + ], + "spans": [ + { + "bbox": [ + 315, + 531, + 479, + 543 + ], + "type": "text", + "content": "Xin Zhang" + }, + { + "bbox": [ + 315, + 531, + 479, + 543 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 315, + 531, + 479, + 543 + ], + "type": "text", + "content": " (zhangxin14@xiaomi.com)," + } + ] + } + ], + "index": 48 + }, + { + "bbox": [ + 315, + 543, + 487, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 543, + 487, + 555 + ], + "spans": [ + { + "bbox": [ + 315, + 543, + 487, + 555 + ], + "type": "text", + "content": "Heng Zhang" + }, + { + "bbox": [ + 315, + 543, + 487, + 555 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 315, + 543, + 487, + 555 + ], + "type": "text", + "content": " (zhangheng8@xiaomi.com)," + } + ] + } + ], + "index": 49 + }, + { + "bbox": [ + 315, + 567, + 365, + 578 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 567, + 365, + 578 + ], + "spans": [ + { + "bbox": [ + 315, + 567, + 365, + 578 + ], + "type": "text", + "content": "Affiliations:" + } + ] + } + ], + "index": 50 + }, + { + "bbox": [ + 315, + 578, + 536, + 639 + ], + "type": "list", + "angle": 0, + "index": 56, + "blocks": [ + { + "bbox": [ + 315, + 578, + 473, + 590 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 578, + 473, + 590 + ], + "spans": [ + { + "bbox": [ + 315, + 578, + 473, + 590 + ], + "type": "text", + "content": "1 Multimedia Department, Xiaomi Inc." + } + ] + } + ], + "index": 51 + }, + { + "bbox": [ + 315, + 590, + 526, + 603 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 590, + 526, + 603 + ], + "spans": [ + { + "bbox": [ + 315, + 590, + 526, + 603 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 315, + 590, + 526, + 603 + ], + "type": "text", + "content": " School of Integrated Circuits, Tsinghua University" + } + ] + } + ], + "index": 52 + }, + { + "bbox": [ + 315, + 603, + 399, + 614 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 603, + 399, + 614 + ], + "spans": [ + { + "bbox": [ + 315, + 603, + 399, + 614 + ], + "type": "text", + "content": "3 Cornell University" + } + ] + } + ], + "index": 53 + }, + { + "bbox": [ + 315, + 614, + 536, + 626 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 614, + 536, + 626 + ], + "spans": [ + { + "bbox": [ + 315, + 614, + 536, + 626 + ], + "type": "text", + "content": "4 Hanhai Information Technology (Shanghai) Co., Ltd." + } + ] + } + ], + "index": 54 + }, + { + "bbox": [ + 315, + 626, + 457, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 626, + 457, + 639 + ], + "spans": [ + { + "bbox": [ + 315, + 626, + 457, + 639 + ], + "type": "text", + "content": "5 Huatai Insurance Group Co., Ltd." + } + ] + } + ], + "index": 55 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 315, + 659, + 378, + 670 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 659, + 378, + 670 + ], + "spans": [ + { + "bbox": [ + 315, + 659, + 378, + 670 + ], + "type": "text", + "content": "ShannonLab" + } + ] + } + ], + "index": 57 + }, + { + "bbox": [ + 314, + 677, + 553, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 677, + 553, + 689 + ], + "spans": [ + { + "bbox": [ + 314, + 677, + 553, + 689 + ], + "type": "text", + "content": "Title: Reparameterization Network for Efficient Image" + } + ] + } + ], + "index": 58 + }, + { + "bbox": [ + 315, + 689, + 386, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 689, + 386, + 700 + ], + "spans": [ + { + "bbox": [ + 315, + 689, + 386, + 700 + ], + "type": "text", + "content": "Super-Resolution" + } + ] + } + ], + "index": 59 + }, + { + "bbox": [ + 315, + 701, + 358, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 701, + 358, + 712 + ], + "spans": [ + { + "bbox": [ + 315, + 701, + 358, + 712 + ], + "type": "text", + "content": "Members:" + } + ] + } + ], + "index": 60 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 732, + 311, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 732, + 311, + 742 + ], + "spans": [ + { + "bbox": [ + 299, + 732, + 311, + 742 + ], + "type": "text", + "content": "39" + } + ] + } + ], + "index": 61 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 38 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 195, + 132 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 195, + 132 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 195, + 132 + ], + "type": "text", + "content": "Qing Luo" + }, + { + "bbox": [ + 55, + 72, + 195, + 132 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 55, + 72, + 195, + 132 + ], + "type": "text", + "content": " (luoqing.94@qq.com), Linyan Jiang" + }, + { + "bbox": [ + 55, + 72, + 195, + 132 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 55, + 72, + 195, + 132 + ], + "type": "text", + "content": ", Haibo Lei" + }, + { + "bbox": [ + 55, + 72, + 195, + 132 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 55, + 72, + 195, + 132 + ], + "type": "text", + "content": ", Qifang Gao" + }, + { + "bbox": [ + 55, + 72, + 195, + 132 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 55, + 72, + 195, + 132 + ], + "type": "text", + "content": ", Yaqing Li" + }, + { + "bbox": [ + 55, + 72, + 195, + 132 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 55, + 72, + 195, + 132 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 144, + 107, + 167 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 144, + 107, + 167 + ], + "spans": [ + { + "bbox": [ + 55, + 144, + 107, + 167 + ], + "type": "text", + "content": "Affiliations: \n1Tencent" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 188, + 86, + 199 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 188, + 86, + 199 + ], + "spans": [ + { + "bbox": [ + 55, + 188, + 86, + 199 + ], + "type": "text", + "content": "TSSR" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 205, + 295, + 254 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 205, + 295, + 254 + ], + "spans": [ + { + "bbox": [ + 55, + 205, + 295, + 254 + ], + "type": "text", + "content": "Title: Light Network for Efficient Image Super-Resolution \nMembers: \nWeihua Luo1 (185471613@qq.com), \nTsing Li1," + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 266, + 159, + 289 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 266, + 159, + 289 + ], + "spans": [ + { + "bbox": [ + 55, + 266, + 159, + 289 + ], + "type": "text", + "content": "Affiliations: \n1 Independent researcher" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 310, + 85, + 322 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 310, + 85, + 322 + ], + "spans": [ + { + "bbox": [ + 55, + 310, + 85, + 322 + ], + "type": "text", + "content": "mbga" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 327, + 276, + 423 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 327, + 276, + 423 + ], + "spans": [ + { + "bbox": [ + 55, + 327, + 276, + 423 + ], + "type": "text", + "content": "Title: Expanded SPAN for Efficient Super-Resolution Members: \nQing Wang" + }, + { + "bbox": [ + 55, + 327, + 276, + 423 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 55, + 327, + 276, + 423 + ], + "type": "text", + "content": " (wangqing.Keen@bytedance.com), \nYi Liu" + }, + { + "bbox": [ + 55, + 327, + 276, + 423 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 55, + 327, + 276, + 423 + ], + "type": "text", + "content": ", \nYang Wang" + }, + { + "bbox": [ + 55, + 327, + 276, + 423 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 55, + 327, + 276, + 423 + ], + "type": "text", + "content": ", \nHongyu An" + }, + { + "bbox": [ + 55, + 327, + 276, + 423 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 55, + 327, + 276, + 423 + ], + "type": "text", + "content": ", \nLiou Zhang" + }, + { + "bbox": [ + 55, + 327, + 276, + 423 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 55, + 327, + 276, + 423 + ], + "type": "text", + "content": ", \nShijie Zhao" + }, + { + "bbox": [ + 55, + 327, + 276, + 423 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 55, + 327, + 276, + 423 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 435, + 110, + 459 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 435, + 110, + 459 + ], + "spans": [ + { + "bbox": [ + 55, + 435, + 110, + 459 + ], + "type": "text", + "content": "Affiliations: \n1 ByteDance" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 55, + 479, + 101, + 490 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 479, + 101, + 490 + ], + "spans": [ + { + "bbox": [ + 55, + 479, + 101, + 490 + ], + "type": "text", + "content": "VPEG_C" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 55, + 496, + 295, + 592 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 496, + 295, + 592 + ], + "spans": [ + { + "bbox": [ + 55, + 496, + 295, + 592 + ], + "type": "text", + "content": "Title: DAN: Dual Attention Network for lightweight Image Super-Resolution \nMembers: \nLianhong Song1 (songlianhong@njust.edu.cn), \nLong Sun1, \nJinshan Pan1, \nJiangxin Dong1, \nJinhui Tang1" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 55, + 605, + 253, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 605, + 253, + 628 + ], + "spans": [ + { + "bbox": [ + 55, + 605, + 253, + 628 + ], + "type": "text", + "content": "Affiliations: \n1Nanjing University of Science and Technology" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 55, + 647, + 111, + 660 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 647, + 111, + 660 + ], + "spans": [ + { + "bbox": [ + 55, + 647, + 111, + 660 + ], + "type": "text", + "content": "XUPTBoys" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 55, + 665, + 295, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 665, + 295, + 714 + ], + "spans": [ + { + "bbox": [ + 55, + 665, + 295, + 714 + ], + "type": "text", + "content": "Title: Frequency-Guided Multi-level Dispersion Network for Efficient Image Super-Resolution \nMembers: Jing Wei1 (freedomwj@126.com)," + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 314, + 72, + 392, + 118 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 72, + 392, + 118 + ], + "spans": [ + { + "bbox": [ + 314, + 72, + 392, + 118 + ], + "type": "text", + "content": "Mengyang Wang1, Ruilong Guo1, Qian Wang1,2, Affiliations:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 315, + 120, + 553, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 120, + 553, + 156 + ], + "spans": [ + { + "bbox": [ + 315, + 120, + 553, + 156 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 315, + 120, + 553, + 156 + ], + "type": "text", + "content": " Xi'an University of Posts and Telecommunications " + }, + { + "bbox": [ + 315, + 120, + 553, + 156 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 315, + 120, + 553, + 156 + ], + "type": "text", + "content": " National Engineering Laboratory for Cyber Event Warning and Control Technologies" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 315, + 173, + 369, + 185 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 173, + 369, + 185 + ], + "spans": [ + { + "bbox": [ + 315, + 173, + 369, + 185 + ], + "type": "text", + "content": "HannahSR" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 314, + 190, + 553, + 299 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 190, + 553, + 299 + ], + "spans": [ + { + "bbox": [ + 314, + 190, + 553, + 299 + ], + "type": "text", + "content": "Title: Multi-level Refinement and Bias-learnable Attention Dual Branch Network for Efficient Image Super-Resolution Members: Qingliang Liu" + }, + { + "bbox": [ + 314, + 190, + 553, + 299 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 314, + 190, + 553, + 299 + ], + "type": "text", + "content": " (liuqingliang1@honor.com), Yang Cheng" + }, + { + "bbox": [ + 314, + 190, + 553, + 299 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 314, + 190, + 553, + 299 + ], + "type": "text", + "content": " (obliviate73@outlook.com) Affiliations: \n" + }, + { + "bbox": [ + 314, + 190, + 553, + 299 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 314, + 190, + 553, + 299 + ], + "type": "text", + "content": " Beijing Honor Device Co., Ltd. \n" + }, + { + "bbox": [ + 314, + 190, + 553, + 299 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 314, + 190, + 553, + 299 + ], + "type": "text", + "content": " State Key Laboratory of Integrated Chip & System, Fudan University" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 315, + 316, + 353, + 327 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 316, + 353, + 327 + ], + "spans": [ + { + "bbox": [ + 315, + 316, + 353, + 327 + ], + "type": "text", + "content": "Davinci" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 315, + 333, + 455, + 406 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 333, + 455, + 406 + ], + "spans": [ + { + "bbox": [ + 315, + 333, + 455, + 406 + ], + "type": "text", + "content": "Title: PlayerAug \nMembers: \nDavinci (1016994139@qq.com), \nEnxuan Gu1(guexstan@163.com), \nAffiliations: \n1 Dalian University of Technology" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 315, + 422, + 347, + 434 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 422, + 347, + 434 + ], + "spans": [ + { + "bbox": [ + 315, + 422, + 347, + 434 + ], + "type": "text", + "content": "SRCB" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 315, + 441, + 503, + 513 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 441, + 503, + 513 + ], + "spans": [ + { + "bbox": [ + 315, + 441, + 503, + 513 + ], + "type": "text", + "content": "Title: SPAN with pruning. \nMembers: \nDafeng Zhang1 (dfeng.zhang@samsung.com), Yang Yong1, \nAffiliations: \n1 Samsung Research China - Beijing (SRC-B)" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 315, + 529, + 364, + 540 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 529, + 364, + 540 + ], + "spans": [ + { + "bbox": [ + 315, + 529, + 364, + 540 + ], + "type": "text", + "content": "Rochester" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 314, + 547, + 553, + 654 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 547, + 553, + 654 + ], + "spans": [ + { + "bbox": [ + 314, + 547, + 553, + 654 + ], + "type": "text", + "content": "Title: ESRNet: An enhanced version of SPAN for Efficient Super-Resolution \nMembers: \nPinxin Liu1 (pliu23@ur.rochester.edu), \nYongsheng Yu1 (yyu90@ur.rochester.edu), \nHang Hua1 (hhua2@cs.rochester.edu), \nYunlong Tang1 (yunlong.tang@rochester.edu), \nAffiliations: \n1 University of Rochester" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 315, + 672, + 343, + 683 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 672, + 343, + 683 + ], + "spans": [ + { + "bbox": [ + 315, + 672, + 343, + 683 + ], + "type": "text", + "content": "IESR" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 314, + 689, + 509, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 689, + 509, + 712 + ], + "spans": [ + { + "bbox": [ + 314, + 689, + 509, + 712 + ], + "type": "text", + "content": "Title: Inference Efficient Super-Rosolution Net Members:" + } + ] + } + ], + "index": 24 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 732, + 311, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 732, + 311, + 742 + ], + "spans": [ + { + "bbox": [ + 299, + 732, + 311, + 742 + ], + "type": "text", + "content": "40" + } + ] + } + ], + "index": 25 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 39 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 237, + 132 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 237, + 132 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 237, + 132 + ], + "type": "text", + "content": "Shihao Wang1 (shihao.wsh@antgroup.com), Yukun Yang1, Zhiyu Zhang1, Affiliations: \n1 Ant Group" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 56, + 156, + 81, + 167 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 156, + 81, + 167 + ], + "spans": [ + { + "bbox": [ + 56, + 156, + 81, + 167 + ], + "type": "text", + "content": "ASR" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 56, + 175, + 106, + 186 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 175, + 106, + 186 + ], + "spans": [ + { + "bbox": [ + 56, + 175, + 106, + 186 + ], + "type": "text", + "content": "Title: ASR" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 56, + 188, + 99, + 198 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 188, + 99, + 198 + ], + "spans": [ + { + "bbox": [ + 56, + 188, + 99, + 198 + ], + "type": "text", + "content": "Members:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 56, + 199, + 230, + 212 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 199, + 230, + 212 + ], + "spans": [ + { + "bbox": [ + 56, + 199, + 230, + 212 + ], + "type": "text", + "content": "Yukun Yang" + }, + { + "bbox": [ + 56, + 199, + 230, + 212 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 56, + 199, + 230, + 212 + ], + "type": "text", + "content": " (yukun.yyk@antgroup.com)," + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 213, + 107, + 223 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 213, + 107, + 223 + ], + "spans": [ + { + "bbox": [ + 56, + 213, + 107, + 223 + ], + "type": "text", + "content": "Affiliations:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 57, + 223, + 87, + 234 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 223, + 87, + 234 + ], + "spans": [ + { + "bbox": [ + 57, + 223, + 87, + 234 + ], + "type": "text", + "content": "1 None" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 56, + 259, + 102, + 271 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 259, + 102, + 271 + ], + "spans": [ + { + "bbox": [ + 56, + 259, + 102, + 271 + ], + "type": "text", + "content": "VPEG_O" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 55, + 278, + 295, + 303 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 278, + 295, + 303 + ], + "spans": [ + { + "bbox": [ + 55, + 278, + 295, + 303 + ], + "type": "text", + "content": "Title: SAFMNv3: Simple Feature Modulation Network for Real-Time Image Super-Resolution" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 56, + 304, + 99, + 314 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 304, + 99, + 314 + ], + "spans": [ + { + "bbox": [ + 56, + 304, + 99, + 314 + ], + "type": "text", + "content": "Members:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 56, + 315, + 213, + 327 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 315, + 213, + 327 + ], + "spans": [ + { + "bbox": [ + 56, + 315, + 213, + 327 + ], + "type": "text", + "content": "Long Sun1 (cs.longsun@njust.edu.cn)," + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 56, + 327, + 124, + 338 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 327, + 124, + 338 + ], + "spans": [ + { + "bbox": [ + 56, + 327, + 124, + 338 + ], + "type": "text", + "content": "Lianhong Son1," + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 56, + 339, + 112, + 350 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 339, + 112, + 350 + ], + "spans": [ + { + "bbox": [ + 56, + 339, + 112, + 350 + ], + "type": "text", + "content": "Jinshan Pan1," + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 56, + 351, + 124, + 363 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 351, + 124, + 363 + ], + "spans": [ + { + "bbox": [ + 56, + 351, + 124, + 363 + ], + "type": "text", + "content": "Jiangxin Dong1," + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 56, + 363, + 108, + 374 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 363, + 108, + 374 + ], + "spans": [ + { + "bbox": [ + 56, + 363, + 108, + 374 + ], + "type": "text", + "content": "Jinhui Tang" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 56, + 376, + 107, + 386 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 376, + 107, + 386 + ], + "spans": [ + { + "bbox": [ + 56, + 376, + 107, + 386 + ], + "type": "text", + "content": "Affiliations:" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 57, + 386, + 252, + 399 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 386, + 252, + 399 + ], + "spans": [ + { + "bbox": [ + 57, + 386, + 252, + 399 + ], + "type": "text", + "content": "1 Nanjing University of Science and Technology" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 56, + 423, + 91, + 434 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 423, + 91, + 434 + ], + "spans": [ + { + "bbox": [ + 56, + 423, + 91, + 434 + ], + "type": "text", + "content": "mmSR" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 55, + 442, + 295, + 466 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 442, + 295, + 466 + ], + "spans": [ + { + "bbox": [ + 55, + 442, + 295, + 466 + ], + "type": "text", + "content": "Title: Efficient Feature Aggregation Network for Image Super-Resolution" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 56, + 467, + 99, + 477 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 467, + 99, + 477 + ], + "spans": [ + { + "bbox": [ + 56, + 467, + 99, + 477 + ], + "type": "text", + "content": "Members:" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 56, + 479, + 182, + 491 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 479, + 182, + 491 + ], + "spans": [ + { + "bbox": [ + 56, + 479, + 182, + 491 + ], + "type": "text", + "content": "Jiyu " + }, + { + "bbox": [ + 56, + 479, + 182, + 491 + ], + "type": "inline_equation", + "content": "\\mathsf{W u}^1" + }, + { + "bbox": [ + 56, + 479, + 182, + 491 + ], + "type": "text", + "content": " (jiyu_wu@163.com)," + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 56, + 491, + 223, + 502 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 491, + 223, + 502 + ], + "spans": [ + { + "bbox": [ + 56, + 491, + 223, + 502 + ], + "type": "text", + "content": "Jiancheng Huang " + }, + { + "bbox": [ + 56, + 491, + 223, + 502 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 56, + 491, + 223, + 502 + ], + "type": "text", + "content": "(jc.huang@siat.ac.cn)," + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 56, + 502, + 106, + 513 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 502, + 106, + 513 + ], + "spans": [ + { + "bbox": [ + 56, + 502, + 106, + 513 + ], + "type": "text", + "content": "Yifan Liu1," + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 56, + 514, + 106, + 525 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 514, + 106, + 525 + ], + "spans": [ + { + "bbox": [ + 56, + 514, + 106, + 525 + ], + "type": "text", + "content": "Yi Huang " + }, + { + "bbox": [ + 56, + 514, + 106, + 525 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 56, + 514, + 106, + 525 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 56, + 525, + 121, + 538 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 525, + 121, + 538 + ], + "spans": [ + { + "bbox": [ + 56, + 525, + 121, + 538 + ], + "type": "text", + "content": "Shifeng Chen 1," + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 56, + 539, + 107, + 550 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 539, + 107, + 550 + ], + "spans": [ + { + "bbox": [ + 56, + 539, + 107, + 550 + ], + "type": "text", + "content": "Affiliations:" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 56, + 550, + 295, + 574 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 550, + 295, + 574 + ], + "spans": [ + { + "bbox": [ + 56, + 550, + 295, + 574 + ], + "type": "text", + "content": "1 Shenzhen Institutes of Advanced Technology, Chinese Academy of Sciences" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 56, + 598, + 99, + 609 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 598, + 99, + 609 + ], + "spans": [ + { + "bbox": [ + 56, + 598, + 99, + 609 + ], + "type": "text", + "content": "ChanSR" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 55, + 617, + 295, + 642 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 617, + 295, + 642 + ], + "spans": [ + { + "bbox": [ + 55, + 617, + 295, + 642 + ], + "type": "text", + "content": "Title: EECNet: Edge Enhanced Convolutional Network for Efficient Super-Resolution" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 56, + 643, + 99, + 652 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 643, + 99, + 652 + ], + "spans": [ + { + "bbox": [ + 56, + 643, + 99, + 652 + ], + "type": "text", + "content": "Members:" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 56, + 653, + 193, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 653, + 193, + 666 + ], + "spans": [ + { + "bbox": [ + 56, + 653, + 193, + 666 + ], + "type": "text", + "content": "Rui Chen1 (chenr269@163.com)," + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 56, + 666, + 107, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 666, + 107, + 677 + ], + "spans": [ + { + "bbox": [ + 56, + 666, + 107, + 677 + ], + "type": "text", + "content": "Affiliations:" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 56, + 677, + 295, + 701 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 677, + 295, + 701 + ], + "spans": [ + { + "bbox": [ + 56, + 677, + 295, + 701 + ], + "type": "text", + "content": "1 Shenzhen International Graduate School, Tsinghua University, China" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 315, + 72, + 396, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 72, + 396, + 83 + ], + "spans": [ + { + "bbox": [ + 315, + 72, + 396, + 83 + ], + "type": "text", + "content": "Pixel Alchemists" + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 315, + 89, + 381, + 100 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 89, + 381, + 100 + ], + "spans": [ + { + "bbox": [ + 315, + 89, + 381, + 100 + ], + "type": "text", + "content": "Title: RCUNet" + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 315, + 102, + 358, + 112 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 102, + 358, + 112 + ], + "spans": [ + { + "bbox": [ + 315, + 102, + 358, + 112 + ], + "type": "text", + "content": "Members:" + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 315, + 114, + 453, + 125 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 114, + 453, + 125 + ], + "spans": [ + { + "bbox": [ + 315, + 114, + 453, + 125 + ], + "type": "text", + "content": "Yi Feng" + }, + { + "bbox": [ + 315, + 114, + 453, + 125 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 315, + 114, + 453, + 125 + ], + "type": "text", + "content": " (fenyi_work@163.com)," + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 315, + 125, + 364, + 137 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 125, + 364, + 137 + ], + "spans": [ + { + "bbox": [ + 315, + 125, + 364, + 137 + ], + "type": "text", + "content": "Mingxi " + }, + { + "bbox": [ + 315, + 125, + 364, + 137 + ], + "type": "inline_equation", + "content": "\\mathrm{Li}^1" + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 315, + 137, + 365, + 148 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 137, + 365, + 148 + ], + "spans": [ + { + "bbox": [ + 315, + 137, + 365, + 148 + ], + "type": "text", + "content": "Cailu Wan1," + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 315, + 149, + 370, + 161 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 149, + 370, + 161 + ], + "spans": [ + { + "bbox": [ + 315, + 149, + 370, + 161 + ], + "type": "text", + "content": "Xiangji " + }, + { + "bbox": [ + 315, + 149, + 370, + 161 + ], + "type": "inline_equation", + "content": "\\mathbf{W}\\mathbf{u}^{1}" + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 315, + 174, + 365, + 184 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 174, + 365, + 184 + ], + "spans": [ + { + "bbox": [ + 315, + 174, + 365, + 184 + ], + "type": "text", + "content": "Affiliations:" + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 315, + 185, + 417, + 196 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 185, + 417, + 196 + ], + "spans": [ + { + "bbox": [ + 315, + 185, + 417, + 196 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 315, + 185, + 417, + 196 + ], + "type": "text", + "content": " Independent researcher" + } + ] + } + ], + "index": 41 + }, + { + "bbox": [ + 315, + 216, + 332, + 227 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 216, + 332, + 227 + ], + "spans": [ + { + "bbox": [ + 315, + 216, + 332, + 227 + ], + "type": "text", + "content": "LZ" + } + ] + } + ], + "index": 42 + }, + { + "bbox": [ + 314, + 233, + 553, + 245 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 233, + 553, + 245 + ], + "spans": [ + { + "bbox": [ + 314, + 233, + 553, + 245 + ], + "type": "text", + "content": "Title: Tensor decompose efficient super-resolution network" + } + ] + } + ], + "index": 43 + }, + { + "bbox": [ + 315, + 246, + 358, + 256 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 246, + 358, + 256 + ], + "spans": [ + { + "bbox": [ + 315, + 246, + 358, + 256 + ], + "type": "text", + "content": "Members:" + } + ] + } + ], + "index": 44 + }, + { + "bbox": [ + 315, + 257, + 460, + 269 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 257, + 460, + 269 + ], + "spans": [ + { + "bbox": [ + 315, + 257, + 460, + 269 + ], + "type": "text", + "content": "Zibin Liu1 (1451971605@qq.com)," + } + ] + } + ], + "index": 45 + }, + { + "bbox": [ + 315, + 270, + 481, + 281 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 270, + 481, + 281 + ], + "spans": [ + { + "bbox": [ + 315, + 270, + 481, + 281 + ], + "type": "text", + "content": "Jinyang Zhong" + }, + { + "bbox": [ + 315, + 270, + 481, + 281 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 315, + 270, + 481, + 281 + ], + "type": "text", + "content": " (1439764064@qq.com)," + } + ] + } + ], + "index": 46 + }, + { + "bbox": [ + 315, + 293, + 365, + 304 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 293, + 365, + 304 + ], + "spans": [ + { + "bbox": [ + 315, + 293, + 365, + 304 + ], + "type": "text", + "content": "Affiliations:" + } + ] + } + ], + "index": 47 + }, + { + "bbox": [ + 315, + 304, + 446, + 316 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 304, + 446, + 316 + ], + "spans": [ + { + "bbox": [ + 315, + 304, + 446, + 316 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 315, + 304, + 446, + 316 + ], + "type": "text", + "content": " Southwest Jiaotong University" + } + ] + } + ], + "index": 48 + }, + { + "bbox": [ + 315, + 317, + 400, + 328 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 317, + 400, + 328 + ], + "spans": [ + { + "bbox": [ + 315, + 317, + 400, + 328 + ], + "type": "text", + "content": "Sichuan University" + } + ] + } + ], + "index": 49 + }, + { + "bbox": [ + 315, + 348, + 331, + 359 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 348, + 331, + 359 + ], + "spans": [ + { + "bbox": [ + 315, + 348, + 331, + 359 + ], + "type": "text", + "content": "Z6" + } + ] + } + ], + "index": 50 + }, + { + "bbox": [ + 314, + 365, + 553, + 389 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 365, + 553, + 389 + ], + "spans": [ + { + "bbox": [ + 314, + 365, + 553, + 389 + ], + "type": "text", + "content": "Title: GLoReNet: Global and Local feature Refinement Network for Efficient Super-Resolution" + } + ] + } + ], + "index": 51 + }, + { + "bbox": [ + 315, + 390, + 358, + 399 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 390, + 358, + 399 + ], + "spans": [ + { + "bbox": [ + 315, + 390, + 358, + 399 + ], + "type": "text", + "content": "Members:" + } + ] + } + ], + "index": 52 + }, + { + "bbox": [ + 315, + 401, + 479, + 413 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 401, + 479, + 413 + ], + "spans": [ + { + "bbox": [ + 315, + 401, + 479, + 413 + ], + "type": "text", + "content": "Kihwan Yoon" + }, + { + "bbox": [ + 315, + 401, + 479, + 413 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 315, + 401, + 479, + 413 + ], + "type": "text", + "content": " (rlghksdbs@gmail.com)," + } + ] + } + ], + "index": 53 + }, + { + "bbox": [ + 315, + 414, + 410, + 426 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 414, + 410, + 426 + ], + "spans": [ + { + "bbox": [ + 315, + 414, + 410, + 426 + ], + "type": "text", + "content": "Ganzorig Gankhuyag1," + } + ] + } + ], + "index": 54 + }, + { + "bbox": [ + 315, + 426, + 365, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 426, + 365, + 437 + ], + "spans": [ + { + "bbox": [ + 315, + 426, + 365, + 437 + ], + "type": "text", + "content": "Affiliations:" + } + ] + } + ], + "index": 55 + }, + { + "bbox": [ + 315, + 437, + 512, + 449 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 437, + 512, + 449 + ], + "spans": [ + { + "bbox": [ + 315, + 437, + 512, + 449 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 315, + 437, + 512, + 449 + ], + "type": "text", + "content": " Korea Electronics Technology Institute (KETI)" + } + ] + } + ], + "index": 56 + }, + { + "bbox": [ + 315, + 468, + 365, + 479 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 468, + 365, + 479 + ], + "spans": [ + { + "bbox": [ + 315, + 468, + 365, + 479 + ], + "type": "text", + "content": "TACO_SR" + } + ] + } + ], + "index": 57 + }, + { + "bbox": [ + 315, + 486, + 396, + 496 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 486, + 396, + 496 + ], + "spans": [ + { + "bbox": [ + 315, + 486, + 396, + 496 + ], + "type": "text", + "content": "Title: TenInOneSR" + } + ] + } + ], + "index": 58 + }, + { + "bbox": [ + 315, + 498, + 358, + 508 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 498, + 358, + 508 + ], + "spans": [ + { + "bbox": [ + 315, + 498, + 358, + 508 + ], + "type": "text", + "content": "Members:" + } + ] + } + ], + "index": 59 + }, + { + "bbox": [ + 315, + 510, + 535, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 510, + 535, + 521 + ], + "spans": [ + { + "bbox": [ + 315, + 510, + 535, + 521 + ], + "type": "text", + "content": "Shengyun Zhong" + }, + { + "bbox": [ + 315, + 510, + 535, + 521 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 315, + 510, + 535, + 521 + ], + "type": "text", + "content": " (shengyunzhong2002@gmail.com)," + } + ] + } + ], + "index": 60 + }, + { + "bbox": [ + 315, + 521, + 476, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 521, + 476, + 533 + ], + "spans": [ + { + "bbox": [ + 315, + 521, + 476, + 533 + ], + "type": "text", + "content": "Mingyang " + }, + { + "bbox": [ + 315, + 521, + 476, + 533 + ], + "type": "inline_equation", + "content": "\\mathbf{W u}^{2}" + }, + { + "bbox": [ + 315, + 521, + 476, + 533 + ], + "type": "text", + "content": " (mingyang@tamu.edu)," + } + ] + } + ], + "index": 61 + }, + { + "bbox": [ + 315, + 533, + 439, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 533, + 439, + 544 + ], + "spans": [ + { + "bbox": [ + 315, + 533, + 439, + 544 + ], + "type": "text", + "content": "Renjie " + }, + { + "bbox": [ + 315, + 533, + 439, + 544 + ], + "type": "inline_equation", + "content": "\\mathrm{Li}^2" + }, + { + "bbox": [ + 315, + 533, + 439, + 544 + ], + "type": "text", + "content": " renjie@tamu.edu)," + } + ] + } + ], + "index": 62 + }, + { + "bbox": [ + 315, + 545, + 485, + 557 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 545, + 485, + 557 + ], + "spans": [ + { + "bbox": [ + 315, + 545, + 485, + 557 + ], + "type": "text", + "content": "Yushen Zuo" + }, + { + "bbox": [ + 315, + 545, + 485, + 557 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 315, + 545, + 485, + 557 + ], + "type": "text", + "content": " (zuoyushen12@gmail.com)," + } + ] + } + ], + "index": 63 + }, + { + "bbox": [ + 315, + 558, + 454, + 569 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 558, + 454, + 569 + ], + "spans": [ + { + "bbox": [ + 315, + 558, + 454, + 569 + ], + "type": "text", + "content": "Zhengzhong " + }, + { + "bbox": [ + 315, + 558, + 454, + 569 + ], + "type": "inline_equation", + "content": "\\mathrm{Tu}^2" + }, + { + "bbox": [ + 315, + 558, + 454, + 569 + ], + "type": "text", + "content": " (tzz@tamu.edu)," + } + ] + } + ], + "index": 64 + }, + { + "bbox": [ + 315, + 570, + 365, + 580 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 570, + 365, + 580 + ], + "spans": [ + { + "bbox": [ + 315, + 570, + 365, + 580 + ], + "type": "text", + "content": "Affiliations:" + } + ] + } + ], + "index": 65 + }, + { + "bbox": [ + 315, + 581, + 533, + 617 + ], + "type": "list", + "angle": 0, + "index": 69, + "blocks": [ + { + "bbox": [ + 315, + 581, + 444, + 593 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 581, + 444, + 593 + ], + "spans": [ + { + "bbox": [ + 315, + 581, + 444, + 593 + ], + "type": "text", + "content": "1 Northeastern University, USA" + } + ] + } + ], + "index": 66 + }, + { + "bbox": [ + 315, + 593, + 442, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 593, + 442, + 605 + ], + "spans": [ + { + "bbox": [ + 315, + 593, + 442, + 605 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 315, + 593, + 442, + 605 + ], + "type": "text", + "content": " Texas A&M University, USA" + } + ] + } + ], + "index": 67 + }, + { + "bbox": [ + 315, + 605, + 533, + 617 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 605, + 533, + 617 + ], + "spans": [ + { + "bbox": [ + 315, + 605, + 533, + 617 + ], + "type": "text", + "content": "3 The Hong Kong Polytechnic University, Hong Kong" + } + ] + } + ], + "index": 68 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 315, + 635, + 361, + 647 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 635, + 361, + 647 + ], + "spans": [ + { + "bbox": [ + 315, + 635, + 361, + 647 + ], + "type": "text", + "content": "AIOT.AI" + } + ] + } + ], + "index": 70 + }, + { + "bbox": [ + 314, + 653, + 553, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 653, + 553, + 677 + ], + "spans": [ + { + "bbox": [ + 314, + 653, + 553, + 677 + ], + "type": "text", + "content": "Title: Efficient channel attention super-resolution network acting on space" + } + ] + } + ], + "index": 71 + }, + { + "bbox": [ + 315, + 678, + 358, + 688 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 678, + 358, + 688 + ], + "spans": [ + { + "bbox": [ + 315, + 678, + 358, + 688 + ], + "type": "text", + "content": "Members:" + } + ] + } + ], + "index": 72 + }, + { + "bbox": [ + 315, + 689, + 481, + 701 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 689, + 481, + 701 + ], + "spans": [ + { + "bbox": [ + 315, + 689, + 481, + 701 + ], + "type": "text", + "content": "Zongang Gao " + }, + { + "bbox": [ + 315, + 689, + 481, + 701 + ], + "type": "inline_equation", + "content": "1^{1}" + }, + { + "bbox": [ + 315, + 689, + 481, + 701 + ], + "type": "text", + "content": " (gaozongang@qq.com)," + } + ] + } + ], + "index": 73 + }, + { + "bbox": [ + 315, + 702, + 383, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 702, + 383, + 712 + ], + "spans": [ + { + "bbox": [ + 315, + 702, + 383, + 712 + ], + "type": "text", + "content": "Guannan Chen1," + } + ] + } + ], + "index": 74 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 732, + 310, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 732, + 310, + 742 + ], + "spans": [ + { + "bbox": [ + 299, + 732, + 310, + 742 + ], + "type": "text", + "content": "41" + } + ] + } + ], + "index": 75 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 40 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 72, + 107, + 83 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 72, + 107, + 83 + ], + "spans": [ + { + "bbox": [ + 56, + 72, + 107, + 83 + ], + "type": "text", + "content": "Yuan Tian1," + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 56, + 84, + 116, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 84, + 116, + 95 + ], + "spans": [ + { + "bbox": [ + 56, + 84, + 116, + 95 + ], + "type": "text", + "content": "Wenhui Chen" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 56, + 97, + 107, + 107 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 97, + 107, + 107 + ], + "spans": [ + { + "bbox": [ + 56, + 97, + 107, + 107 + ], + "type": "text", + "content": "Affiliations:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 56, + 107, + 199, + 120 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 107, + 199, + 120 + ], + "spans": [ + { + "bbox": [ + 56, + 107, + 199, + 120 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 56, + 107, + 199, + 120 + ], + "type": "text", + "content": " BOE, AIOT CTO, Beijing, China" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 56, + 140, + 97, + 152 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 140, + 97, + 152 + ], + "spans": [ + { + "bbox": [ + 56, + 140, + 97, + 152 + ], + "type": "text", + "content": "JNU620" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 157, + 295, + 181 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 157, + 295, + 181 + ], + "spans": [ + { + "bbox": [ + 56, + 157, + 295, + 181 + ], + "type": "text", + "content": "Title: Reparameterized Residual Local Feature Network for Efficient Image Super-Resolution" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 56, + 182, + 99, + 193 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 182, + 99, + 193 + ], + "spans": [ + { + "bbox": [ + 56, + 182, + 99, + 193 + ], + "type": "text", + "content": "Members:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 56, + 194, + 242, + 205 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 194, + 242, + 205 + ], + "spans": [ + { + "bbox": [ + 56, + 194, + 242, + 205 + ], + "type": "text", + "content": "Weijun Yuan" + }, + { + "bbox": [ + 56, + 194, + 242, + 205 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 56, + 194, + 242, + 205 + ], + "type": "text", + "content": " (yweijun@stu2022.jnu.edu.cn)," + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 56, + 206, + 96, + 217 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 206, + 96, + 217 + ], + "spans": [ + { + "bbox": [ + 56, + 206, + 96, + 217 + ], + "type": "text", + "content": "Zhan Li1," + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 56, + 217, + 117, + 229 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 217, + 117, + 229 + ], + "spans": [ + { + "bbox": [ + 56, + 217, + 117, + 229 + ], + "type": "text", + "content": "Yihang Chen1," + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 56, + 230, + 112, + 242 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 230, + 112, + 242 + ], + "spans": [ + { + "bbox": [ + 56, + 230, + 112, + 242 + ], + "type": "text", + "content": "Yifan Deng1," + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 56, + 242, + 116, + 254 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 242, + 116, + 254 + ], + "spans": [ + { + "bbox": [ + 56, + 242, + 116, + 254 + ], + "type": "text", + "content": "Ruting Deng1," + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 56, + 255, + 107, + 265 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 255, + 107, + 265 + ], + "spans": [ + { + "bbox": [ + 56, + 255, + 107, + 265 + ], + "type": "text", + "content": "Affiliations:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 56, + 265, + 130, + 277 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 265, + 130, + 277 + ], + "spans": [ + { + "bbox": [ + 56, + 265, + 130, + 277 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 56, + 265, + 130, + 277 + ], + "type": "text", + "content": " Jinan University" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 56, + 297, + 137, + 310 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 297, + 137, + 310 + ], + "spans": [ + { + "bbox": [ + 56, + 297, + 137, + 310 + ], + "type": "text", + "content": "LVGroup_HFUT" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 56, + 315, + 295, + 338 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 315, + 295, + 338 + ], + "spans": [ + { + "bbox": [ + 56, + 315, + 295, + 338 + ], + "type": "text", + "content": "Title: Swift Parameter-free Attention Network for Efficient Image Super-Resolution" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 56, + 340, + 99, + 350 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 340, + 99, + 350 + ], + "spans": [ + { + "bbox": [ + 56, + 340, + 99, + 350 + ], + "type": "text", + "content": "Members:" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 56, + 351, + 194, + 363 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 351, + 194, + 363 + ], + "spans": [ + { + "bbox": [ + 56, + 351, + 194, + 363 + ], + "type": "text", + "content": "Yilin Zhang" + }, + { + "bbox": [ + 56, + 351, + 194, + 363 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 56, + 351, + 194, + 363 + ], + "type": "text", + "content": " (eslzzyl@163.com)," + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 56, + 363, + 241, + 375 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 363, + 241, + 375 + ], + "spans": [ + { + "bbox": [ + 56, + 363, + 241, + 375 + ], + "type": "text", + "content": "Huan Zheng" + }, + { + "bbox": [ + 56, + 363, + 241, + 375 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 56, + 363, + 241, + 375 + ], + "type": "text", + "content": ", (huanzheng1998@gmail.com)," + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 56, + 375, + 201, + 386 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 375, + 201, + 386 + ], + "spans": [ + { + "bbox": [ + 56, + 375, + 201, + 386 + ], + "type": "text", + "content": "Yanyan Wei1 (weiyy@hfut.edu.cn)," + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 56, + 387, + 242, + 399 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 387, + 242, + 399 + ], + "spans": [ + { + "bbox": [ + 56, + 387, + 242, + 399 + ], + "type": "text", + "content": "Wenxuan Zhao" + }, + { + "bbox": [ + 56, + 387, + 242, + 399 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 56, + 387, + 242, + 399 + ], + "type": "text", + "content": " (nightvoyagerr@gmail.com)," + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 56, + 399, + 227, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 399, + 227, + 411 + ], + "spans": [ + { + "bbox": [ + 56, + 399, + 227, + 411 + ], + "type": "text", + "content": "Suiyi Zhao" + }, + { + "bbox": [ + 56, + 399, + 227, + 411 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 56, + 399, + 227, + 411 + ], + "type": "text", + "content": " (meranderzhao@gmail.com)," + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 56, + 411, + 200, + 423 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 411, + 200, + 423 + ], + "spans": [ + { + "bbox": [ + 56, + 411, + 200, + 423 + ], + "type": "text", + "content": "Fei Wang1 (jiafei127@gmail.com)," + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 56, + 423, + 194, + 435 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 423, + 194, + 435 + ], + "spans": [ + { + "bbox": [ + 56, + 423, + 194, + 435 + ], + "type": "text", + "content": "Kun Li" + }, + { + "bbox": [ + 56, + 423, + 194, + 435 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 56, + 423, + 194, + 435 + ], + "type": "text", + "content": " (kunli.hfut@gmail.com)," + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 56, + 436, + 107, + 446 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 436, + 107, + 446 + ], + "spans": [ + { + "bbox": [ + 56, + 436, + 107, + 446 + ], + "type": "text", + "content": "Affiliations:" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 56, + 446, + 192, + 459 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 446, + 192, + 459 + ], + "spans": [ + { + "bbox": [ + 56, + 446, + 192, + 459 + ], + "type": "text", + "content": "1 Hefei University of Technology" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 56, + 459, + 148, + 471 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 459, + 148, + 471 + ], + "spans": [ + { + "bbox": [ + 56, + 459, + 148, + 471 + ], + "type": "text", + "content": "2 University of Macau" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 56, + 491, + 75, + 502 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 491, + 75, + 502 + ], + "spans": [ + { + "bbox": [ + 56, + 491, + 75, + 502 + ], + "type": "text", + "content": "YG" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 56, + 508, + 295, + 532 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 508, + 295, + 532 + ], + "spans": [ + { + "bbox": [ + 56, + 508, + 295, + 532 + ], + "type": "text", + "content": "Title: Spatial-Gate Self-Distillation Network for Efficient Image Super-Resolution" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 56, + 533, + 99, + 543 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 533, + 99, + 543 + ], + "spans": [ + { + "bbox": [ + 56, + 533, + 99, + 543 + ], + "type": "text", + "content": "Members:" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 56, + 544, + 212, + 556 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 544, + 212, + 556 + ], + "spans": [ + { + "bbox": [ + 56, + 544, + 212, + 556 + ], + "type": "text", + "content": "Yinggan Tang " + }, + { + "bbox": [ + 56, + 544, + 212, + 556 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 56, + 544, + 212, + 556 + ], + "type": "text", + "content": " (ygtang@ysu.edu.cn)," + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 56, + 556, + 114, + 568 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 556, + 114, + 568 + ], + "spans": [ + { + "bbox": [ + 56, + 556, + 114, + 568 + ], + "type": "text", + "content": "Mengjie Su 2," + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 56, + 569, + 107, + 580 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 569, + 107, + 580 + ], + "spans": [ + { + "bbox": [ + 56, + 569, + 107, + 580 + ], + "type": "text", + "content": "Affiliations:" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 56, + 580, + 279, + 592 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 580, + 279, + 592 + ], + "spans": [ + { + "bbox": [ + 56, + 580, + 279, + 592 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 56, + 580, + 279, + 592 + ], + "type": "text", + "content": " School of Electrical Engineering, Yanshan University" + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 56, + 612, + 179, + 624 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 612, + 179, + 624 + ], + "spans": [ + { + "bbox": [ + 56, + 612, + 179, + 624 + ], + "type": "text", + "content": "MegastudyEdu_Vision.AI" + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 56, + 629, + 295, + 654 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 629, + 295, + 654 + ], + "spans": [ + { + "bbox": [ + 56, + 629, + 295, + 654 + ], + "type": "text", + "content": "Title: Multi-scale Aggregation Attention Network for Efficient Image Super-resolution" + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 56, + 654, + 99, + 664 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 654, + 99, + 664 + ], + "spans": [ + { + "bbox": [ + 56, + 654, + 99, + 664 + ], + "type": "text", + "content": "Members:" + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 56, + 665, + 235, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 665, + 235, + 677 + ], + "spans": [ + { + "bbox": [ + 56, + 665, + 235, + 677 + ], + "type": "text", + "content": "Jae-hyeon Lee " + }, + { + "bbox": [ + 56, + 665, + 235, + 677 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 56, + 665, + 235, + 677 + ], + "type": "text", + "content": " (dlwogus147@gmail.com)," + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 56, + 677, + 137, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 677, + 137, + 689 + ], + "spans": [ + { + "bbox": [ + 56, + 677, + 137, + 689 + ], + "type": "text", + "content": "Dong-Hyeop Son1," + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 56, + 689, + 115, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 689, + 115, + 700 + ], + "spans": [ + { + "bbox": [ + 56, + 689, + 115, + 700 + ], + "type": "text", + "content": "Ui-Jin Choi1," + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 56, + 701, + 107, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 701, + 107, + 713 + ], + "spans": [ + { + "bbox": [ + 56, + 701, + 107, + 713 + ], + "type": "text", + "content": "Affiliations:" + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 315, + 72, + 426, + 84 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 72, + 426, + 84 + ], + "spans": [ + { + "bbox": [ + 315, + 72, + 426, + 84 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 315, + 72, + 426, + 84 + ], + "type": "text", + "content": " MegastudyEdu Vision AI" + } + ] + } + ], + "index": 41 + }, + { + "bbox": [ + 315, + 104, + 347, + 115 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 104, + 347, + 115 + ], + "spans": [ + { + "bbox": [ + 315, + 104, + 347, + 115 + ], + "type": "text", + "content": "MILA" + } + ] + } + ], + "index": 42 + }, + { + "bbox": [ + 314, + 121, + 553, + 146 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 121, + 553, + 146 + ], + "spans": [ + { + "bbox": [ + 314, + 121, + 553, + 146 + ], + "type": "text", + "content": "Title: Multi-Level Variance Feature Modulation Network for Lightweight Image Super-Resolution" + } + ] + } + ], + "index": 43 + }, + { + "bbox": [ + 315, + 147, + 358, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 147, + 358, + 156 + ], + "spans": [ + { + "bbox": [ + 315, + 147, + 358, + 156 + ], + "type": "text", + "content": "Members:" + } + ] + } + ], + "index": 44 + }, + { + "bbox": [ + 315, + 157, + 529, + 170 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 157, + 529, + 170 + ], + "spans": [ + { + "bbox": [ + 315, + 157, + 529, + 170 + ], + "type": "text", + "content": "Tiancheng Shao1 (shaotiancheng666@outlook.com)," + } + ] + } + ], + "index": 45 + }, + { + "bbox": [ + 315, + 170, + 380, + 182 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 170, + 380, + 182 + ], + "spans": [ + { + "bbox": [ + 315, + 170, + 380, + 182 + ], + "type": "text", + "content": "Yuqing Zhang2" + } + ] + } + ], + "index": 46 + }, + { + "bbox": [ + 315, + 182, + 387, + 194 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 182, + 387, + 194 + ], + "spans": [ + { + "bbox": [ + 315, + 182, + 387, + 194 + ], + "type": "text", + "content": "Mengcheng " + }, + { + "bbox": [ + 315, + 182, + 387, + 194 + ], + "type": "inline_equation", + "content": "\\mathrm{Ma}^3" + } + ] + } + ], + "index": 47 + }, + { + "bbox": [ + 315, + 194, + 365, + 205 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 194, + 365, + 205 + ], + "spans": [ + { + "bbox": [ + 315, + 194, + 365, + 205 + ], + "type": "text", + "content": "Affiliations:" + } + ] + } + ], + "index": 48 + }, + { + "bbox": [ + 315, + 205, + 454, + 218 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 205, + 454, + 218 + ], + "spans": [ + { + "bbox": [ + 315, + 205, + 454, + 218 + ], + "type": "text", + "content": "1 Anhui University of Technology" + } + ] + } + ], + "index": 49 + }, + { + "bbox": [ + 315, + 237, + 363, + 249 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 237, + 363, + 249 + ], + "spans": [ + { + "bbox": [ + 315, + 237, + 363, + 249 + ], + "type": "text", + "content": "AiMF_SR" + } + ] + } + ], + "index": 50 + }, + { + "bbox": [ + 314, + 255, + 553, + 279 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 255, + 553, + 279 + ], + "spans": [ + { + "bbox": [ + 314, + 255, + 553, + 279 + ], + "type": "text", + "content": "Title: Mixture of Efficient Attention for Efficient Image Super-Resolution" + } + ] + } + ], + "index": 51 + }, + { + "bbox": [ + 315, + 280, + 358, + 289 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 280, + 358, + 289 + ], + "spans": [ + { + "bbox": [ + 315, + 280, + 358, + 289 + ], + "type": "text", + "content": "Members:" + } + ] + } + ], + "index": 52 + }, + { + "bbox": [ + 315, + 291, + 476, + 304 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 291, + 476, + 304 + ], + "spans": [ + { + "bbox": [ + 315, + 291, + 476, + 304 + ], + "type": "text", + "content": "Donggeun " + }, + { + "bbox": [ + 315, + 291, + 476, + 304 + ], + "type": "inline_equation", + "content": "\\mathrm{Ko}^1" + }, + { + "bbox": [ + 315, + 291, + 476, + 304 + ], + "type": "text", + "content": " (sean.ko@aimfuture.ai)," + } + ] + } + ], + "index": 53 + }, + { + "bbox": [ + 315, + 304, + 394, + 315 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 304, + 394, + 315 + ], + "spans": [ + { + "bbox": [ + 315, + 304, + 394, + 315 + ], + "type": "text", + "content": "Youngsang Kwak1," + } + ] + } + ], + "index": 54 + }, + { + "bbox": [ + 315, + 316, + 358, + 326 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 316, + 358, + 326 + ], + "spans": [ + { + "bbox": [ + 315, + 316, + 358, + 326 + ], + "type": "text", + "content": "Jiun Lee1," + } + ] + } + ], + "index": 55 + }, + { + "bbox": [ + 315, + 327, + 380, + 338 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 327, + 380, + 338 + ], + "spans": [ + { + "bbox": [ + 315, + 327, + 380, + 338 + ], + "type": "text", + "content": "Jaehwa Kwak1," + } + ] + } + ], + "index": 56 + }, + { + "bbox": [ + 315, + 339, + 365, + 351 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 339, + 365, + 351 + ], + "spans": [ + { + "bbox": [ + 315, + 339, + 365, + 351 + ], + "type": "text", + "content": "Affiliations:" + } + ] + } + ], + "index": 57 + }, + { + "bbox": [ + 315, + 351, + 389, + 361 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 351, + 389, + 361 + ], + "spans": [ + { + "bbox": [ + 315, + 351, + 389, + 361 + ], + "type": "text", + "content": "1 AiM Future Inc." + } + ] + } + ], + "index": 58 + }, + { + "bbox": [ + 315, + 383, + 359, + 394 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 383, + 359, + 394 + ], + "spans": [ + { + "bbox": [ + 315, + 383, + 359, + 394 + ], + "type": "text", + "content": "BVIVSR" + } + ] + } + ], + "index": 59 + }, + { + "bbox": [ + 314, + 400, + 531, + 413 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 400, + 531, + 413 + ], + "spans": [ + { + "bbox": [ + 314, + 400, + 531, + 413 + ], + "type": "text", + "content": "Title: NTIRE 2025 Efficient SR Challenge Factsheet" + } + ] + } + ], + "index": 60 + }, + { + "bbox": [ + 315, + 413, + 358, + 423 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 413, + 358, + 423 + ], + "spans": [ + { + "bbox": [ + 315, + 413, + 358, + 423 + ], + "type": "text", + "content": "Members:" + } + ] + } + ], + "index": 61 + }, + { + "bbox": [ + 315, + 424, + 497, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 424, + 497, + 437 + ], + "spans": [ + { + "bbox": [ + 315, + 424, + 497, + 437 + ], + "type": "text", + "content": "Yuxuan Jiang" + }, + { + "bbox": [ + 315, + 424, + 497, + 437 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 315, + 424, + 497, + 437 + ], + "type": "text", + "content": " (yuxuan.jiang@bristol.ac.uk)," + } + ] + } + ], + "index": 62 + }, + { + "bbox": [ + 315, + 437, + 492, + 449 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 437, + 492, + 449 + ], + "spans": [ + { + "bbox": [ + 315, + 437, + 492, + 449 + ], + "type": "text", + "content": "Qiang Zhu" + }, + { + "bbox": [ + 315, + 437, + 492, + 449 + ], + "type": "inline_equation", + "content": "^{2,1}" + }, + { + "bbox": [ + 315, + 437, + 492, + 449 + ], + "type": "text", + "content": " (zhuqiang@std.uestc.edu.cn)," + } + ] + } + ], + "index": 63 + }, + { + "bbox": [ + 315, + 449, + 477, + 460 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 449, + 477, + 460 + ], + "spans": [ + { + "bbox": [ + 315, + 449, + 477, + 460 + ], + "type": "text", + "content": "Siyue Teng1 (siyue.teng@bristol.ac.uk)," + } + ] + } + ], + "index": 64 + }, + { + "bbox": [ + 315, + 460, + 476, + 472 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 460, + 476, + 472 + ], + "spans": [ + { + "bbox": [ + 315, + 460, + 476, + 472 + ], + "type": "text", + "content": "Fan Zhang1, (fan.zhang@bristol.ac.uk)," + } + ] + } + ], + "index": 65 + }, + { + "bbox": [ + 315, + 472, + 470, + 484 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 472, + 470, + 484 + ], + "spans": [ + { + "bbox": [ + 315, + 472, + 470, + 484 + ], + "type": "text", + "content": "Shuyuan Zhu2, (eezsy@uestc.edu.cn)," + } + ] + } + ], + "index": 66 + }, + { + "bbox": [ + 315, + 484, + 465, + 496 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 484, + 465, + 496 + ], + "spans": [ + { + "bbox": [ + 315, + 484, + 465, + 496 + ], + "type": "text", + "content": "Bing Zeng" + }, + { + "bbox": [ + 315, + 484, + 465, + 496 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 315, + 484, + 465, + 496 + ], + "type": "text", + "content": ", (eezeng@uestc.edu.cn)," + } + ] + } + ], + "index": 67 + }, + { + "bbox": [ + 315, + 496, + 471, + 508 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 496, + 471, + 508 + ], + "spans": [ + { + "bbox": [ + 315, + 496, + 471, + 508 + ], + "type": "text", + "content": "David Bull" + }, + { + "bbox": [ + 315, + 496, + 471, + 508 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 315, + 496, + 471, + 508 + ], + "type": "text", + "content": " (dave.bull@bristol.ac.uk)," + } + ] + } + ], + "index": 68 + }, + { + "bbox": [ + 315, + 509, + 365, + 520 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 509, + 365, + 520 + ], + "spans": [ + { + "bbox": [ + 315, + 509, + 365, + 520 + ], + "type": "text", + "content": "Affiliations:" + } + ] + } + ], + "index": 69 + }, + { + "bbox": [ + 315, + 521, + 553, + 544 + ], + "type": "list", + "angle": 0, + "index": 72, + "blocks": [ + { + "bbox": [ + 315, + 521, + 406, + 532 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 521, + 406, + 532 + ], + "spans": [ + { + "bbox": [ + 315, + 521, + 406, + 532 + ], + "type": "text", + "content": "1 University of Bristol" + } + ] + } + ], + "index": 70 + }, + { + "bbox": [ + 315, + 532, + 553, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 532, + 553, + 544 + ], + "spans": [ + { + "bbox": [ + 315, + 532, + 553, + 544 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 315, + 532, + 553, + 544 + ], + "type": "text", + "content": " University of Electronic Science and Technology of China" + } + ] + } + ], + "index": 71 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 315, + 563, + 372, + 575 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 563, + 372, + 575 + ], + "spans": [ + { + "bbox": [ + 315, + 563, + 372, + 575 + ], + "type": "text", + "content": "CUIT_HTT" + } + ] + } + ], + "index": 73 + }, + { + "bbox": [ + 314, + 582, + 553, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 582, + 553, + 605 + ], + "spans": [ + { + "bbox": [ + 314, + 582, + 553, + 605 + ], + "type": "text", + "content": "Title: Frequency-Segmented Attention Network for Lightweight Image Super" + } + ] + } + ], + "index": 74 + }, + { + "bbox": [ + 315, + 606, + 358, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 606, + 358, + 616 + ], + "spans": [ + { + "bbox": [ + 315, + 606, + 358, + 616 + ], + "type": "text", + "content": "Members:" + } + ] + } + ], + "index": 75 + }, + { + "bbox": [ + 315, + 616, + 437, + 630 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 616, + 437, + 630 + ], + "spans": [ + { + "bbox": [ + 315, + 616, + 437, + 630 + ], + "type": "text", + "content": "Jing Hu1 (jing_hu@163.com)," + } + ] + } + ], + "index": 76 + }, + { + "bbox": [ + 315, + 630, + 362, + 641 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 630, + 362, + 641 + ], + "spans": [ + { + "bbox": [ + 315, + 630, + 362, + 641 + ], + "type": "text", + "content": "Hui Deng1," + } + ] + } + ], + "index": 77 + }, + { + "bbox": [ + 315, + 642, + 373, + 653 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 642, + 373, + 653 + ], + "spans": [ + { + "bbox": [ + 315, + 642, + 373, + 653 + ], + "type": "text", + "content": "Xuan Zhang" + }, + { + "bbox": [ + 315, + 642, + 373, + 653 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 315, + 642, + 373, + 653 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 78 + }, + { + "bbox": [ + 315, + 654, + 353, + 664 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 654, + 353, + 664 + ], + "spans": [ + { + "bbox": [ + 315, + 654, + 353, + 664 + ], + "type": "text", + "content": "Lin Zhu" + } + ] + } + ], + "index": 79 + }, + { + "bbox": [ + 315, + 665, + 364, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 665, + 364, + 677 + ], + "spans": [ + { + "bbox": [ + 315, + 665, + 364, + 677 + ], + "type": "text", + "content": "Qinrui Fan" + } + ] + } + ], + "index": 80 + }, + { + "bbox": [ + 315, + 678, + 365, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 678, + 365, + 689 + ], + "spans": [ + { + "bbox": [ + 315, + 678, + 365, + 689 + ], + "type": "text", + "content": "Affiliations:" + } + ] + } + ], + "index": 81 + }, + { + "bbox": [ + 315, + 689, + 515, + 701 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 689, + 515, + 701 + ], + "spans": [ + { + "bbox": [ + 315, + 689, + 515, + 701 + ], + "type": "text", + "content": "1 Chengdu University of Information Technology" + } + ] + } + ], + "index": 82 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 732, + 311, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 732, + 311, + 742 + ], + "spans": [ + { + "bbox": [ + 299, + 732, + 311, + 742 + ], + "type": "text", + "content": "42" + } + ] + } + ], + "index": 83 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 41 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 72, + 107, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 72, + 107, + 83 + ], + "spans": [ + { + "bbox": [ + 56, + 72, + 107, + 83 + ], + "type": "text", + "content": "GXZY.AI" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 89, + 295, + 114 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 89, + 295, + 114 + ], + "spans": [ + { + "bbox": [ + 55, + 89, + 295, + 114 + ], + "type": "text", + "content": "Title: Parameter Free Vision Mamba For Lightweight Image Super-Resolution" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 56, + 114, + 100, + 124 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 114, + 100, + 124 + ], + "spans": [ + { + "bbox": [ + 56, + 114, + 100, + 124 + ], + "type": "text", + "content": "Members:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 56, + 125, + 214, + 137 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 125, + 214, + 137 + ], + "spans": [ + { + "bbox": [ + 56, + 125, + 214, + 137 + ], + "type": "text", + "content": "Weijian Deng" + }, + { + "bbox": [ + 56, + 125, + 214, + 137 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 56, + 125, + 214, + 137 + ], + "type": "text", + "content": " (348957269@qq.com)," + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 56, + 137, + 204, + 149 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 137, + 204, + 149 + ], + "spans": [ + { + "bbox": [ + 56, + 137, + 204, + 149 + ], + "type": "text", + "content": "Junnan " + }, + { + "bbox": [ + 56, + 137, + 204, + 149 + ], + "type": "inline_equation", + "content": "\\mathbf{W u}^{1}" + }, + { + "bbox": [ + 56, + 137, + 204, + 149 + ], + "type": "text", + "content": " (838050895@qq.com)," + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 149, + 218, + 161 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 149, + 218, + 161 + ], + "spans": [ + { + "bbox": [ + 56, + 149, + 218, + 161 + ], + "type": "text", + "content": "Wenqin Deng" + }, + { + "bbox": [ + 56, + 149, + 218, + 161 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 56, + 149, + 218, + 161 + ], + "type": "text", + "content": " (1601524278@qq.com)," + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 56, + 161, + 205, + 173 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 161, + 205, + 173 + ], + "spans": [ + { + "bbox": [ + 56, + 161, + 205, + 173 + ], + "type": "text", + "content": "Yuquan Liu" + }, + { + "bbox": [ + 56, + 161, + 205, + 173 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 56, + 161, + 205, + 173 + ], + "type": "text", + "content": " (653060432@qq.com)," + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 56, + 173, + 214, + 185 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 173, + 214, + 185 + ], + "spans": [ + { + "bbox": [ + 56, + 173, + 214, + 185 + ], + "type": "text", + "content": "Zhaohong " + }, + { + "bbox": [ + 56, + 173, + 214, + 185 + ], + "type": "inline_equation", + "content": "\\mathrm{Xu}^{1}" + }, + { + "bbox": [ + 56, + 173, + 214, + 185 + ], + "type": "text", + "content": " (719357155@qq.com)," + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 56, + 185, + 107, + 196 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 185, + 107, + 196 + ], + "spans": [ + { + "bbox": [ + 56, + 185, + 107, + 196 + ], + "type": "text", + "content": "Affiliations:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 56, + 196, + 295, + 233 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 56, + 196, + 295, + 219 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 196, + 295, + 219 + ], + "spans": [ + { + "bbox": [ + 56, + 196, + 295, + 219 + ], + "type": "text", + "content": "1 Guangxi China Tobacco Industry Corporation Limited, China" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 56, + 220, + 172, + 233 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 220, + 172, + 233 + ], + "spans": [ + { + "bbox": [ + 56, + 220, + 172, + 233 + ], + "type": "text", + "content": "2 Guangxi University, China" + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 56, + 251, + 86, + 263 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 251, + 86, + 263 + ], + "spans": [ + { + "bbox": [ + 56, + 251, + 86, + 263 + ], + "type": "text", + "content": "IPCV" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 56, + 269, + 151, + 281 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 269, + 151, + 281 + ], + "spans": [ + { + "bbox": [ + 56, + 269, + 151, + 281 + ], + "type": "text", + "content": "Title: Efficient HiTSR" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 56, + 281, + 100, + 292 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 281, + 100, + 292 + ], + "spans": [ + { + "bbox": [ + 56, + 281, + 100, + 292 + ], + "type": "text", + "content": "Members:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 56, + 293, + 246, + 305 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 293, + 246, + 305 + ], + "spans": [ + { + "bbox": [ + 56, + 293, + 246, + 305 + ], + "type": "text", + "content": "Jameer Babu Pinjari " + }, + { + "bbox": [ + 56, + 293, + 246, + 305 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 56, + 293, + 246, + 305 + ], + "type": "text", + "content": " (jameer.jb@gmail.com)," + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 56, + 305, + 259, + 317 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 305, + 259, + 317 + ], + "spans": [ + { + "bbox": [ + 56, + 305, + 259, + 317 + ], + "type": "text", + "content": "Kuldeep Purohit " + }, + { + "bbox": [ + 56, + 305, + 259, + 317 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 56, + 305, + 259, + 317 + ], + "type": "text", + "content": ", (kuldeeppurohit3@gmail.com)" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 56, + 317, + 107, + 329 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 317, + 107, + 329 + ], + "spans": [ + { + "bbox": [ + 56, + 317, + 107, + 329 + ], + "type": "text", + "content": "Affiliations:" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 56, + 329, + 159, + 341 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 329, + 159, + 341 + ], + "spans": [ + { + "bbox": [ + 56, + 329, + 159, + 341 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 56, + 329, + 159, + 341 + ], + "type": "text", + "content": " Independent researcher" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 56, + 360, + 78, + 372 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 360, + 78, + 372 + ], + "spans": [ + { + "bbox": [ + 56, + 360, + 78, + 372 + ], + "type": "text", + "content": "X-L" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 56, + 377, + 295, + 401 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 377, + 295, + 401 + ], + "spans": [ + { + "bbox": [ + 56, + 377, + 295, + 401 + ], + "type": "text", + "content": "Title: Partial Permuted Self-Attention for Lightweight Super-Resolution" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 56, + 402, + 100, + 412 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 402, + 100, + 412 + ], + "spans": [ + { + "bbox": [ + 56, + 402, + 100, + 412 + ], + "type": "text", + "content": "Members:" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 56, + 414, + 216, + 426 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 414, + 216, + 426 + ], + "spans": [ + { + "bbox": [ + 56, + 414, + 216, + 426 + ], + "type": "text", + "content": "Zeyu Xiao" + }, + { + "bbox": [ + 56, + 414, + 216, + 426 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 56, + 414, + 216, + 426 + ], + "type": "text", + "content": " (zeyuxiao1997@163.com)," + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 56, + 426, + 242, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 426, + 242, + 437 + ], + "spans": [ + { + "bbox": [ + 56, + 426, + 242, + 437 + ], + "type": "text", + "content": "Zhuoyuan Li" + }, + { + "bbox": [ + 56, + 426, + 242, + 437 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 56, + 426, + 242, + 437 + ], + "type": "text", + "content": " (zhuoyuanli@mail.ustc.edu.cn)" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 56, + 437, + 107, + 449 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 437, + 107, + 449 + ], + "spans": [ + { + "bbox": [ + 56, + 437, + 107, + 449 + ], + "type": "text", + "content": "Affiliations:" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 56, + 449, + 255, + 473 + ], + "type": "list", + "angle": 0, + "index": 27, + "blocks": [ + { + "bbox": [ + 56, + 449, + 198, + 461 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 449, + 198, + 461 + ], + "spans": [ + { + "bbox": [ + 56, + 449, + 198, + 461 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 56, + 449, + 198, + 461 + ], + "type": "text", + "content": " National University of Singapore" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 56, + 461, + 255, + 473 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 461, + 255, + 473 + ], + "spans": [ + { + "bbox": [ + 56, + 461, + 255, + 473 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 56, + 461, + 255, + 473 + ], + "type": "text", + "content": " University of Science and Technology of China" + } + ] + } + ], + "index": 26 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 56, + 493, + 125, + 506 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 493, + 125, + 506 + ], + "spans": [ + { + "bbox": [ + 56, + 493, + 125, + 506 + ], + "type": "text", + "content": "Quantum_Res" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 56, + 510, + 295, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 510, + 295, + 533 + ], + "spans": [ + { + "bbox": [ + 56, + 510, + 295, + 533 + ], + "type": "text", + "content": "Title: Efficient Mamba-Based Image Super-Resolution via Knowledge Distillation" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 56, + 535, + 100, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 535, + 100, + 544 + ], + "spans": [ + { + "bbox": [ + 56, + 535, + 100, + 544 + ], + "type": "text", + "content": "Members:" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 56, + 545, + 245, + 558 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 545, + 245, + 558 + ], + "spans": [ + { + "bbox": [ + 56, + 545, + 245, + 558 + ], + "type": "text", + "content": "Surya Vashist" + }, + { + "bbox": [ + 56, + 545, + 245, + 558 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 56, + 545, + 245, + 558 + ], + "type": "text", + "content": " (surya.vashisth@s.amity.edu)," + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 56, + 558, + 268, + 569 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 558, + 268, + 569 + ], + "spans": [ + { + "bbox": [ + 56, + 558, + 268, + 569 + ], + "type": "text", + "content": "Akshay Dudhane" + }, + { + "bbox": [ + 56, + 558, + 268, + 569 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 56, + 558, + 268, + 569 + ], + "type": "text", + "content": " (akshay.dudhane@mbzuai.ac.ae)," + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 56, + 569, + 231, + 582 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 569, + 231, + 582 + ], + "spans": [ + { + "bbox": [ + 56, + 569, + 231, + 582 + ], + "type": "text", + "content": "Praful Hambarde3 (praful@iitmandi.ac.in)," + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 56, + 582, + 283, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 582, + 283, + 594 + ], + "spans": [ + { + "bbox": [ + 56, + 582, + 283, + 594 + ], + "type": "text", + "content": "Sachin Chaudhary" + }, + { + "bbox": [ + 56, + 582, + 283, + 594 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 56, + 582, + 283, + 594 + ], + "type": "text", + "content": " (sachin.chaudhary@ddn.upes.ac.in)," + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 56, + 594, + 247, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 594, + 247, + 605 + ], + "spans": [ + { + "bbox": [ + 56, + 594, + 247, + 605 + ], + "type": "text", + "content": "Satya Naryan Tazi" + }, + { + "bbox": [ + 56, + 594, + 247, + 605 + ], + "type": "inline_equation", + "content": "^{5}" + }, + { + "bbox": [ + 56, + 594, + 247, + 605 + ], + "type": "text", + "content": " (satya.tazi@ecajmer.ac.in)," + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 56, + 605, + 204, + 617 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 605, + 204, + 617 + ], + "spans": [ + { + "bbox": [ + 56, + 605, + 204, + 617 + ], + "type": "text", + "content": "Prashant Patil" + }, + { + "bbox": [ + 56, + 605, + 204, + 617 + ], + "type": "inline_equation", + "content": "^{6}" + }, + { + "bbox": [ + 56, + 605, + 204, + 617 + ], + "type": "text", + "content": " (pwpatil@iitg.ac.in)," + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 56, + 617, + 271, + 630 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 617, + 271, + 630 + ], + "spans": [ + { + "bbox": [ + 56, + 617, + 271, + 630 + ], + "type": "text", + "content": "Santosh Kumar Vipparthi7 (skvipparthi@iitrpr.ac.in)," + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 56, + 630, + 231, + 642 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 630, + 231, + 642 + ], + "spans": [ + { + "bbox": [ + 56, + 630, + 231, + 642 + ], + "type": "text", + "content": "Subrahmanyam Murala8 (muralas@tcd.ie)," + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 56, + 642, + 107, + 653 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 642, + 107, + 653 + ], + "spans": [ + { + "bbox": [ + 56, + 642, + 107, + 653 + ], + "type": "text", + "content": "Affiliations:" + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 56, + 653, + 294, + 712 + ], + "type": "list", + "angle": 0, + "index": 44, + "blocks": [ + { + "bbox": [ + 56, + 653, + 192, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 653, + 192, + 665 + ], + "spans": [ + { + "bbox": [ + 56, + 653, + 192, + 665 + ], + "type": "text", + "content": "1 Amity University Punjab, India" + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 56, + 665, + 294, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 665, + 294, + 689 + ], + "spans": [ + { + "bbox": [ + 56, + 665, + 294, + 689 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 56, + 665, + 294, + 689 + ], + "type": "text", + "content": " Mohamed Bin Zayed University of Artificial Intelligence, Abu Dhabi" + } + ] + } + ], + "index": 41 + }, + { + "bbox": [ + 56, + 689, + 241, + 701 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 689, + 241, + 701 + ], + "spans": [ + { + "bbox": [ + 56, + 689, + 241, + 701 + ], + "type": "text", + "content": "3 Indian Institute of Technology Mandi, India" + } + ] + } + ], + "index": 42 + }, + { + "bbox": [ + 56, + 701, + 157, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 701, + 157, + 712 + ], + "spans": [ + { + "bbox": [ + 56, + 701, + 157, + 712 + ], + "type": "text", + "content": "4 UPES Dehradun, India" + } + ] + } + ], + "index": 43 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 314, + 72, + 512, + 120 + ], + "type": "list", + "angle": 0, + "index": 49, + "blocks": [ + { + "bbox": [ + 314, + 72, + 512, + 84 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 72, + 512, + 84 + ], + "spans": [ + { + "bbox": [ + 314, + 72, + 512, + 84 + ], + "type": "inline_equation", + "content": "^{5}" + }, + { + "bbox": [ + 314, + 72, + 512, + 84 + ], + "type": "text", + "content": " Government Engineering College Ajmer, India" + } + ] + } + ], + "index": 45 + }, + { + "bbox": [ + 315, + 84, + 512, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 84, + 512, + 95 + ], + "spans": [ + { + "bbox": [ + 315, + 84, + 512, + 95 + ], + "type": "inline_equation", + "content": "^{6}" + }, + { + "bbox": [ + 315, + 84, + 512, + 95 + ], + "type": "text", + "content": " Indian Institute of Technology Guwahati, India" + } + ] + } + ], + "index": 46 + }, + { + "bbox": [ + 315, + 96, + 497, + 108 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 96, + 497, + 108 + ], + "spans": [ + { + "bbox": [ + 315, + 96, + 497, + 108 + ], + "type": "inline_equation", + "content": "^{7}" + }, + { + "bbox": [ + 315, + 96, + 497, + 108 + ], + "type": "text", + "content": " Indian Institute of Technology Ropar, India" + } + ] + } + ], + "index": 47 + }, + { + "bbox": [ + 315, + 108, + 449, + 120 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 108, + 449, + 120 + ], + "spans": [ + { + "bbox": [ + 315, + 108, + 449, + 120 + ], + "type": "inline_equation", + "content": "^{8}" + }, + { + "bbox": [ + 315, + 108, + 449, + 120 + ], + "type": "text", + "content": " Trinity College Dublin, Ireland" + } + ] + } + ], + "index": 48 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 314, + 140, + 358, + 152 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 140, + 358, + 152 + ], + "spans": [ + { + "bbox": [ + 314, + 140, + 358, + 152 + ], + "type": "text", + "content": "SylabSR" + } + ] + } + ], + "index": 50 + }, + { + "bbox": [ + 313, + 157, + 541, + 169 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 157, + 541, + 169 + ], + "spans": [ + { + "bbox": [ + 313, + 157, + 541, + 169 + ], + "type": "text", + "content": "Title: AutoRegressive Residual Local Feature Network" + } + ] + } + ], + "index": 51 + }, + { + "bbox": [ + 315, + 170, + 358, + 180 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 170, + 358, + 180 + ], + "spans": [ + { + "bbox": [ + 315, + 170, + 358, + 180 + ], + "type": "text", + "content": "Members:" + } + ] + } + ], + "index": 52 + }, + { + "bbox": [ + 315, + 181, + 490, + 193 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 181, + 490, + 193 + ], + "spans": [ + { + "bbox": [ + 315, + 181, + 490, + 193 + ], + "type": "text", + "content": "Wei-Chen Shen" + }, + { + "bbox": [ + 315, + 181, + 490, + 193 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 315, + 181, + 490, + 193 + ], + "type": "text", + "content": " (r11921a38@ntu.edu.tw)," + } + ] + } + ], + "index": 53 + }, + { + "bbox": [ + 315, + 193, + 392, + 205 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 193, + 392, + 205 + ], + "spans": [ + { + "bbox": [ + 315, + 193, + 392, + 205 + ], + "type": "text", + "content": "I-Hsiang Chen" + }, + { + "bbox": [ + 315, + 193, + 392, + 205 + ], + "type": "inline_equation", + "content": "^{1,2}" + }, + { + "bbox": [ + 315, + 193, + 392, + 205 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 54 + }, + { + "bbox": [ + 315, + 205, + 365, + 217 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 205, + 365, + 217 + ], + "spans": [ + { + "bbox": [ + 315, + 205, + 365, + 217 + ], + "type": "text", + "content": "Affiliations:" + } + ] + } + ], + "index": 55 + }, + { + "bbox": [ + 315, + 217, + 434, + 241 + ], + "type": "list", + "angle": 0, + "index": 58, + "blocks": [ + { + "bbox": [ + 315, + 217, + 434, + 229 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 217, + 434, + 229 + ], + "spans": [ + { + "bbox": [ + 315, + 217, + 434, + 229 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 315, + 217, + 434, + 229 + ], + "type": "text", + "content": " National Taiwan University" + } + ] + } + ], + "index": 56 + }, + { + "bbox": [ + 315, + 229, + 427, + 241 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 229, + 427, + 241 + ], + "spans": [ + { + "bbox": [ + 315, + 229, + 427, + 241 + ], + "type": "text", + "content": "2 University of Washington" + } + ] + } + ], + "index": 57 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 314, + 261, + 361, + 273 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 261, + 361, + 273 + ], + "spans": [ + { + "bbox": [ + 314, + 261, + 361, + 273 + ], + "type": "text", + "content": "NJUPCA" + } + ] + } + ], + "index": 59 + }, + { + "bbox": [ + 313, + 278, + 555, + 302 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 278, + 555, + 302 + ], + "spans": [ + { + "bbox": [ + 313, + 278, + 555, + 302 + ], + "type": "text", + "content": "Title: Spatial-Frequency Fusion Model for Efficient Super-Resolution" + } + ] + } + ], + "index": 60 + }, + { + "bbox": [ + 315, + 303, + 358, + 314 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 303, + 358, + 314 + ], + "spans": [ + { + "bbox": [ + 315, + 303, + 358, + 314 + ], + "type": "text", + "content": "Members:" + } + ] + } + ], + "index": 61 + }, + { + "bbox": [ + 315, + 314, + 497, + 327 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 314, + 497, + 327 + ], + "spans": [ + { + "bbox": [ + 315, + 314, + 497, + 327 + ], + "type": "text", + "content": "Yunzhe " + }, + { + "bbox": [ + 315, + 314, + 497, + 327 + ], + "type": "inline_equation", + "content": "\\mathbf{X}\\mathbf{u}^{1}" + }, + { + "bbox": [ + 315, + 314, + 497, + 327 + ], + "type": "text", + "content": " (221900144@smail.nju.edu.cn)," + } + ] + } + ], + "index": 62 + }, + { + "bbox": [ + 315, + 327, + 369, + 338 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 327, + 369, + 338 + ], + "spans": [ + { + "bbox": [ + 315, + 327, + 369, + 338 + ], + "type": "text", + "content": "Chen Zhao1," + } + ] + } + ], + "index": 63 + }, + { + "bbox": [ + 315, + 338, + 381, + 350 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 338, + 381, + 350 + ], + "spans": [ + { + "bbox": [ + 315, + 338, + 381, + 350 + ], + "type": "text", + "content": "Zhizhou Chen1," + } + ] + } + ], + "index": 64 + }, + { + "bbox": [ + 315, + 350, + 365, + 362 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 350, + 365, + 362 + ], + "spans": [ + { + "bbox": [ + 315, + 350, + 365, + 362 + ], + "type": "text", + "content": "Affiliations:" + } + ] + } + ], + "index": 65 + }, + { + "bbox": [ + 315, + 362, + 400, + 374 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 362, + 400, + 374 + ], + "spans": [ + { + "bbox": [ + 315, + 362, + 400, + 374 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 315, + 362, + 400, + 374 + ], + "type": "text", + "content": " Nanjing University" + } + ] + } + ], + "index": 66 + }, + { + "bbox": [ + 314, + 394, + 365, + 407 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 394, + 365, + 407 + ], + "spans": [ + { + "bbox": [ + 314, + 394, + 365, + 407 + ], + "type": "text", + "content": "DepthIBN" + } + ] + } + ], + "index": 67 + }, + { + "bbox": [ + 313, + 411, + 554, + 436 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 411, + 554, + 436 + ], + "spans": [ + { + "bbox": [ + 313, + 411, + 554, + 436 + ], + "type": "text", + "content": "Title: Involution and BSConv Multi-Depth Distillation Network for Lightweight Image Super-Resolution" + } + ] + } + ], + "index": 68 + }, + { + "bbox": [ + 315, + 436, + 358, + 446 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 436, + 358, + 446 + ], + "spans": [ + { + "bbox": [ + 315, + 436, + 358, + 446 + ], + "type": "text", + "content": "Members:" + } + ] + } + ], + "index": 69 + }, + { + "bbox": [ + 315, + 447, + 539, + 471 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 447, + 539, + 471 + ], + "spans": [ + { + "bbox": [ + 315, + 447, + 539, + 471 + ], + "type": "text", + "content": "Akram Khatami-Rizi " + }, + { + "bbox": [ + 315, + 447, + 539, + 471 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 315, + 447, + 539, + 471 + ], + "type": "text", + "content": " (akramkhatami67@gmail.com), Ahmad Mahmoudi-Aznaveh " + }, + { + "bbox": [ + 315, + 447, + 539, + 471 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 315, + 447, + 539, + 471 + ], + "type": "text", + "content": ", (a.mahmoudi@sbu.ac.ir" + } + ] + } + ], + "index": 70 + }, + { + "bbox": [ + 315, + 472, + 365, + 483 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 472, + 365, + 483 + ], + "spans": [ + { + "bbox": [ + 315, + 472, + 365, + 483 + ], + "type": "text", + "content": "Affiliations:" + } + ] + } + ], + "index": 71 + }, + { + "bbox": [ + 315, + 483, + 553, + 507 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 483, + 553, + 507 + ], + "spans": [ + { + "bbox": [ + 315, + 483, + 553, + 507 + ], + "type": "text", + "content": "1 Cyberspace Research Institute of Shahid Beheshti University of Iran" + } + ] + } + ], + "index": 72 + }, + { + "bbox": [ + 315, + 527, + 365, + 538 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 527, + 365, + 538 + ], + "spans": [ + { + "bbox": [ + 315, + 527, + 365, + 538 + ], + "type": "text", + "content": "Cidaut.AI" + } + ] + } + ], + "index": 73 + }, + { + "bbox": [ + 314, + 544, + 470, + 556 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 544, + 470, + 556 + ], + "spans": [ + { + "bbox": [ + 314, + 544, + 470, + 556 + ], + "type": "text", + "content": "Title: Fused Edge Attention Network" + } + ] + } + ], + "index": 74 + }, + { + "bbox": [ + 315, + 557, + 358, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 557, + 358, + 567 + ], + "spans": [ + { + "bbox": [ + 315, + 557, + 358, + 567 + ], + "type": "text", + "content": "Members:" + } + ] + } + ], + "index": 75 + }, + { + "bbox": [ + 315, + 568, + 476, + 580 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 568, + 476, + 580 + ], + "spans": [ + { + "bbox": [ + 315, + 568, + 476, + 580 + ], + "type": "text", + "content": "Alejandro Merino1 (alemer@cidaut.es)," + } + ] + } + ], + "index": 76 + }, + { + "bbox": [ + 315, + 580, + 471, + 592 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 580, + 471, + 592 + ], + "spans": [ + { + "bbox": [ + 315, + 580, + 471, + 592 + ], + "type": "text", + "content": "Bruno Longarela1 (brulon@cidaut.es)," + } + ] + } + ], + "index": 77 + }, + { + "bbox": [ + 315, + 592, + 451, + 604 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 592, + 451, + 604 + ], + "spans": [ + { + "bbox": [ + 315, + 592, + 451, + 604 + ], + "type": "text", + "content": "Javier Abad1 (javaba@cidadut.es)," + } + ] + } + ], + "index": 78 + }, + { + "bbox": [ + 315, + 604, + 537, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 604, + 537, + 616 + ], + "spans": [ + { + "bbox": [ + 315, + 604, + 537, + 616 + ], + "type": "text", + "content": "Marcos V. Conde" + }, + { + "bbox": [ + 315, + 604, + 537, + 616 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 315, + 604, + 537, + 616 + ], + "type": "text", + "content": " (marcos.conde@uni-wuerzburg.de)," + } + ] + } + ], + "index": 79 + }, + { + "bbox": [ + 315, + 616, + 365, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 616, + 365, + 628 + ], + "spans": [ + { + "bbox": [ + 315, + 616, + 365, + 628 + ], + "type": "text", + "content": "Affiliations:" + } + ] + } + ], + "index": 80 + }, + { + "bbox": [ + 315, + 628, + 461, + 652 + ], + "type": "list", + "angle": 0, + "index": 83, + "blocks": [ + { + "bbox": [ + 315, + 628, + 392, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 628, + 392, + 640 + ], + "spans": [ + { + "bbox": [ + 315, + 628, + 392, + 640 + ], + "type": "text", + "content": "1 Cidaut AI, Spain" + } + ] + } + ], + "index": 81 + }, + { + "bbox": [ + 315, + 640, + 461, + 652 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 640, + 461, + 652 + ], + "spans": [ + { + "bbox": [ + 315, + 640, + 461, + 652 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 315, + 640, + 461, + 652 + ], + "type": "text", + "content": " University of Würzburg, Germany" + } + ] + } + ], + "index": 82 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 315, + 671, + 337, + 683 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 671, + 337, + 683 + ], + "spans": [ + { + "bbox": [ + 315, + 671, + 337, + 683 + ], + "type": "text", + "content": "IVL" + } + ] + } + ], + "index": 84 + }, + { + "bbox": [ + 314, + 689, + 378, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 689, + 378, + 700 + ], + "spans": [ + { + "bbox": [ + 314, + 689, + 378, + 700 + ], + "type": "text", + "content": "Title: PAEDN" + } + ] + } + ], + "index": 85 + }, + { + "bbox": [ + 315, + 701, + 358, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 701, + 358, + 712 + ], + "spans": [ + { + "bbox": [ + 315, + 701, + 358, + 712 + ], + "type": "text", + "content": "Members:" + } + ] + } + ], + "index": 86 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 732, + 311, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 732, + 311, + 742 + ], + "spans": [ + { + "bbox": [ + 299, + 732, + 311, + 742 + ], + "type": "text", + "content": "43" + } + ] + } + ], + "index": 87 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 42 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 251, + 83 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 251, + 83 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 251, + 83 + ], + "type": "text", + "content": "Simone Bianco" + }, + { + "bbox": [ + 55, + 72, + 251, + 83 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 55, + 72, + 251, + 83 + ], + "type": "text", + "content": " (simone.bianco@unimib.com)," + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 84, + 214, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 84, + 214, + 95 + ], + "spans": [ + { + "bbox": [ + 55, + 84, + 214, + 95 + ], + "type": "text", + "content": "Luca Cogo1 (luca.cogo@unimib.com)," + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 96, + 277, + 108 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 96, + 277, + 108 + ], + "spans": [ + { + "bbox": [ + 55, + 96, + 277, + 108 + ], + "type": "text", + "content": "Gianmarco Corti1 (g.corti1967@campus.unimib.com)," + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 121, + 107, + 131 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 121, + 107, + 131 + ], + "spans": [ + { + "bbox": [ + 55, + 121, + 107, + 131 + ], + "type": "text", + "content": "Affiliations:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 132, + 295, + 168 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 132, + 295, + 168 + ], + "spans": [ + { + "bbox": [ + 55, + 132, + 295, + 168 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 55, + 132, + 295, + 168 + ], + "type": "text", + "content": " Department of Informatics Systems and Communication, University of Milano-Bicocca, Viale Sarca 336, Building U14, Milan, Italy" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 190, + 115, + 203 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 190, + 115, + 203 + ], + "spans": [ + { + "bbox": [ + 56, + 190, + 115, + 203 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 61, + 210, + 295, + 713 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 66, + 210, + 295, + 276 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 210, + 295, + 276 + ], + "spans": [ + { + "bbox": [ + 66, + 210, + 295, + 276 + ], + "type": "text", + "content": "[1] Lusine Abrahamyan, Anh Minh Truong, Wilfried Philips, and Nikos Deligiannis. Gradient variance loss for structure-enhanced image super-resolution. In ICASSP 2022-2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 3219-3223. IEEE, 2022. 3" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 65, + 277, + 295, + 331 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 277, + 295, + 331 + ], + "spans": [ + { + "bbox": [ + 65, + 277, + 295, + 331 + ], + "type": "text", + "content": "[2] Eirikur Agustsson and Radu Timofte. Ntire 2017 challenge on single image super-resolution: Dataset and study. In 2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW), pages 1122-1131, 2017. 14" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 65, + 333, + 295, + 377 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 333, + 295, + 377 + ], + "spans": [ + { + "bbox": [ + 65, + 333, + 295, + 377 + ], + "type": "text", + "content": "[3] Eirikur Agustsson and Radu Timofte. Ntire 2017 challenge on single image super-resolution: Dataset and study. In The IEEE Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2017. 33" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 65, + 378, + 295, + 433 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 378, + 295, + 433 + ], + "spans": [ + { + "bbox": [ + 65, + 378, + 295, + 433 + ], + "type": "text", + "content": "[4] Eirikur Agustsson and Radu Timofte. NTIRE 2017 challenge on single image super-resolution: Dataset and study. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops, pages 126-135, 2017. 2, 18, 19, 22, 23, 26, 33" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 65, + 434, + 295, + 478 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 434, + 295, + 478 + ], + "spans": [ + { + "bbox": [ + 65, + 434, + 295, + 478 + ], + "type": "text", + "content": "[5] Akram Khatami-Rizi Ahmad Mahmoudi-Aznaveh. The role of involution in lightweight super resolution. 2024 13th Iranian/3rd International Machine Vision and Image Processing Conference (MVIP), 2024. 37" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 65, + 479, + 295, + 522 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 479, + 295, + 522 + ], + "spans": [ + { + "bbox": [ + 65, + 479, + 295, + 522 + ], + "type": "text", + "content": "[6] Akram Khatami-Rizi Ahmad Mahmoudi-Aznaveh. Involution and bsconv multi-depth distillation network for lightweight image super-resolution. arXiv preprint arXiv:2503.14779, 2025. 37" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 65, + 524, + 295, + 567 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 524, + 295, + 567 + ], + "spans": [ + { + "bbox": [ + 65, + 524, + 295, + 567 + ], + "type": "text", + "content": "[7] Sidra Aleem, Julia Dietlmeier, Eric Arazo, and Suzanne Little. Convlora and adabn based domain adaptation via self-training. In 2024 IEEE International Symposium on Biomedical Imaging (ISBI), pages 1-5. IEEE, 2024. 6, 7" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 65, + 569, + 295, + 644 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 569, + 295, + 644 + ], + "spans": [ + { + "bbox": [ + 65, + 569, + 295, + 644 + ], + "type": "text", + "content": "[8] Jiezhang Cao, Qin Wang, Yongqin Xian, Yawei Li, Bingbing Ni, Zhiming Pi, Kai Zhang, Yulun Zhang, Radu Timofte, and Luc Van Gool. Ciaosr: Continuous implicit attention-in-attention network for arbitrary-scale image super-resolution. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1796–1807, 2023. 2" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 65, + 647, + 295, + 690 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 647, + 295, + 690 + ], + "spans": [ + { + "bbox": [ + 65, + 647, + 295, + 690 + ], + "type": "text", + "content": "[9] Jierun Chen, Shiu-hong Kao, Hao He, Weipeng Zhuo, Song Wen, Chul-Ho Lee, and S-H Gary Chan. Run, don't walk: Chasing higher flops for faster neural networks. In IEEE Conf. Comput. Vis. Pattern Recog., 2023. 33" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 61, + 691, + 294, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 691, + 294, + 713 + ], + "spans": [ + { + "bbox": [ + 61, + 691, + 294, + 713 + ], + "type": "text", + "content": "[10] Liangyu Chen, Xiaojie Chu, Xiangyu Zhang, and Jian Sun. Simple baselines for image restoration, 2022. 38" + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 320, + 73, + 555, + 713 + ], + "type": "list", + "angle": 0, + "index": 29, + "blocks": [ + { + "bbox": [ + 320, + 73, + 554, + 139 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 73, + 554, + 139 + ], + "spans": [ + { + "bbox": [ + 320, + 73, + 554, + 139 + ], + "type": "text", + "content": "[11] Zheng Chen, Zongwei Wu, Eduard Zamfir, Kai Zhang, Yu-lun Zhang, Radu Timofte, Xiaokang Yang, Hongyuan Yu, Cheng Wan, Yuxin Hong, et al. Ntire 2024 challenge on image super-resolution (x4): Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6108-6132, 2024. 30" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 320, + 140, + 554, + 205 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 140, + 554, + 205 + ], + "spans": [ + { + "bbox": [ + 320, + 140, + 554, + 205 + ], + "type": "text", + "content": "[12] Zheng Chen, Kai Liu, Jue Gong, Jingkai Wang, Lei Sun, Zongwei Wu, Radu Timofte, Yulun Zhang, et al. NTIRE 2025 challenge on image super-resolution " + }, + { + "bbox": [ + 320, + 140, + 554, + 205 + ], + "type": "inline_equation", + "content": "(\\times 4)" + }, + { + "bbox": [ + 320, + 140, + 554, + 205 + ], + "type": "text", + "content": ": Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 320, + 207, + 554, + 273 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 207, + 554, + 273 + ], + "spans": [ + { + "bbox": [ + 320, + 207, + 554, + 273 + ], + "type": "text", + "content": "[13] Zheng Chen, Jingkai Wang, Kai Liu, Jue Gong, Lei Sun, Zongwei Wu, Radu Timofte, Yulun Zhang, et al. NTIRE 2025 challenge on real-world face restoration: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 320, + 274, + 554, + 308 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 274, + 554, + 308 + ], + "spans": [ + { + "bbox": [ + 320, + 274, + 554, + 308 + ], + "type": "text", + "content": "[14] Sung-Jin Cho, Seo-Won Ji, Jun-Pyo Hong, Seung-Won Jung, and Sung-Jea Ko. Rethinking coarse-to-fine approach in single image deblurring. In ICCV, 2021. 10, 17, 29" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 320, + 308, + 554, + 352 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 308, + 554, + 352 + ], + "spans": [ + { + "bbox": [ + 320, + 308, + 554, + 352 + ], + "type": "text", + "content": "[15] Sung-Jin Cho, Seo-Won Ji, Jun-Pyo Hong, Seung-Won Jung, and Sung-Jea Ko. Rethinking coarse-to-fine approach in single image deblurring. In ICCV, pages 4641-4650, 2021. 18, 25, 26" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 320, + 354, + 554, + 398 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 354, + 554, + 398 + ], + "spans": [ + { + "bbox": [ + 320, + 354, + 554, + 398 + ], + "type": "text", + "content": "[16] Marcos Conde, Radu Timofte, et al. NTIRE 2025 challenge on raw image restoration and super-resolution. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 320, + 399, + 554, + 453 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 399, + 554, + 453 + ], + "spans": [ + { + "bbox": [ + 320, + 399, + 554, + 453 + ], + "type": "text", + "content": "[17] Marcos Conde, Radu Timofte, et al. Raw image reconstruction from RGB on smartphones. NTIRE 2025 challenge report. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 320, + 456, + 554, + 509 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 456, + 554, + 509 + ], + "spans": [ + { + "bbox": [ + 320, + 456, + 554, + 509 + ], + "type": "text", + "content": "[18] Marcos V Conde, Zhijun Lei, Wen Li, Christos Bampis, Ioannis Katsavounidis, and Radu Timofte. Aim 2024 challenge on efficient video super-resolution for av1 compressed content. arXiv preprint arXiv:2409.17256, 2024. 30" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 320, + 511, + 555, + 567 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 511, + 555, + 567 + ], + "spans": [ + { + "bbox": [ + 320, + 511, + 555, + 567 + ], + "type": "text", + "content": "[19] Weijian Deng, Hongjie Yuan, Lunhui Deng, and Zengtong Lu. Reparameterized residual feature network for lightweight image super-resolution. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 1712-1721, 2023. 22" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 320, + 568, + 554, + 623 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 568, + 554, + 623 + ], + "spans": [ + { + "bbox": [ + 320, + 568, + 554, + 623 + ], + "type": "text", + "content": "[20] Xiaohan Ding, Yuchen Guo, Guiguang Ding, and Jungong Han. Acnet: Strengthening the kernel skeletons for powerful cnn via asymmetric convolution blocks. In Proceedings of the IEEE/CVF international conference on computer vision, pages 1911-1920, 2019. 3" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 320, + 624, + 554, + 679 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 624, + 554, + 679 + ], + "spans": [ + { + "bbox": [ + 320, + 624, + 554, + 679 + ], + "type": "text", + "content": "[21] Xiaohan Ding, Xiangyu Zhang, Jungong Han, and Guiguang Ding. Diverse branch block: Building a convolution as an inception-like unit. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10886-10895, 2021. 3" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 320, + 681, + 554, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 681, + 554, + 713 + ], + "spans": [ + { + "bbox": [ + 320, + 681, + 554, + 713 + ], + "type": "text", + "content": "[22] Xiaohan Ding, Xiangyu Zhang, Ningning Ma, Jungong Han, Guiguang Ding, and Jian Sun. Repvgg: Making veg-style convnets great again. In Proceedings of the IEEE/CVF" + } + ] + } + ], + "index": 28 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 732, + 311, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 732, + 311, + 742 + ], + "spans": [ + { + "bbox": [ + 299, + 732, + 311, + 742 + ], + "type": "text", + "content": "44" + } + ] + } + ], + "index": 30 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 43 + }, + { + "para_blocks": [ + { + "bbox": [ + 61, + 73, + 295, + 713 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 81, + 73, + 294, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 81, + 73, + 294, + 95 + ], + "spans": [ + { + "bbox": [ + 81, + 73, + 294, + 95 + ], + "type": "text", + "content": "Conference on Computer Vision and Pattern Recognition, pages 13733-13742, 2021. 6" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 61, + 96, + 295, + 129 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 96, + 295, + 129 + ], + "spans": [ + { + "bbox": [ + 61, + 96, + 295, + 129 + ], + "type": "text", + "content": "[23] Xiaohan Ding, Xiangyu Zhang, Ningning Ma, Jungong Han, Guiguang Ding, and Jian Sun. Repvgg: Making vgg-style convnets great again. In CVPR, 2021. 9, 17" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 61, + 130, + 295, + 185 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 130, + 295, + 185 + ], + "spans": [ + { + "bbox": [ + 61, + 130, + 295, + 185 + ], + "type": "text", + "content": "[24] Jie Du, Kai Guan, Yanhong Zhou, Yuanman Li, and Tianfu Wang. Parameter-free similarity-aware attention module for medical image classification and segmentation. IEEE Transactions on Emerging Topics in Computational Intelligence, 2022. 6" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 62, + 186, + 295, + 241 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 186, + 295, + 241 + ], + "spans": [ + { + "bbox": [ + 62, + 186, + 295, + 241 + ], + "type": "text", + "content": "[25] Zongcai Du, Ding Liu, Jie Liu, Jie Tang, Gangshan Wu, and Lean Fu. Fast and memory-efficient network towards efficient image super-resolution. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 853-862, 2022. 18" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 62, + 243, + 295, + 297 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 243, + 295, + 297 + ], + "spans": [ + { + "bbox": [ + 62, + 243, + 295, + 297 + ], + "type": "text", + "content": "[26] Zongcai Du, Ding Liu, Jie Liu, Jie Tang, Gangshan Wu, and Lean Fu. Fast and memory-efficient network towards efficient image super-resolution. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 853-862, 2022. 36" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 62, + 299, + 294, + 331 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 299, + 294, + 331 + ], + "spans": [ + { + "bbox": [ + 62, + 299, + 294, + 331 + ], + "type": "text", + "content": "[27] Stefan Elfwing, Eiji Uchibe, and Kenji Doya. Sigmoid-weighted linear units for neural network function approximation in reinforcement learning, 2017. 15, 17" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 62, + 333, + 295, + 420 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 333, + 295, + 420 + ], + "spans": [ + { + "bbox": [ + 62, + 333, + 295, + 420 + ], + "type": "text", + "content": "[28] Egor Ershov, Sergey Korchagin, Alexei Khalin, Artyom Panshin, Arsenyi Terekhin, Ekaterina Zaychenkova, Georgiy Lobarev, Vsevolod Plokhotnyuk, Denis Abramov, Elisey Zhdanov, Sofia Dorogova, Yasin Mamedov, Nikola Banic, Georgii Perevozhikov, Radu Timofte, et al. NTIRE 2025 challenge on night photography rendering. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 62, + 422, + 295, + 487 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 422, + 295, + 487 + ], + "spans": [ + { + "bbox": [ + 62, + 422, + 295, + 487 + ], + "type": "text", + "content": "[29] Yuqian Fu, Xingyu Qiu, Bin Ren Yanwei Fu, Radu Timofte, Nicu Sebe, Ming-Hsuan Yang, Luc Van Gool, et al. NTIRE 2025 challenge on cross-domain few-shot object detection: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 62, + 488, + 295, + 520 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 488, + 295, + 520 + ], + "spans": [ + { + "bbox": [ + 62, + 488, + 295, + 520 + ], + "type": "text", + "content": "[30] Albert Gu and Tri Dao. Mamba: Linear-time sequence modeling with selective state spaces. arXiv preprint arXiv:2312.00752, 2023. 6" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 62, + 523, + 295, + 576 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 523, + 295, + 576 + ], + "spans": [ + { + "bbox": [ + 62, + 523, + 295, + 576 + ], + "type": "text", + "content": "[31] Enxuan Gu, Hongwei Ge, and Yong Guo. Code: An explicit content decoupling framework for image restoration. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2920-2930, 2024. 14" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 62, + 579, + 295, + 622 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 579, + 295, + 622 + ], + "spans": [ + { + "bbox": [ + 62, + 579, + 295, + 622 + ], + "type": "text", + "content": "[32] Hang Guo, Yong Guo, Yaohua Zha, Yulun Zhang, Wenbo Li, Tao Dai, Shu-Tao Xia, and Yawei Li. Mambairv2: Attentive state space restoration. arXiv preprint arXiv:2411.15269, 2024. 6, 30, 34, 35" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 62, + 624, + 295, + 677 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 624, + 295, + 677 + ], + "spans": [ + { + "bbox": [ + 62, + 624, + 295, + 677 + ], + "type": "text", + "content": "[33] Hang Guo, Jinmin Li, Tao Dai, Zhihao Ouyang, Xudong Ren, and Shu-Tao Xia. Mambair: A simple baseline for image restoration with state-space model. In European Conference on Computer Vision, pages 222-241. Springer, 2024. 33" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 62, + 680, + 295, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 680, + 295, + 713 + ], + "spans": [ + { + "bbox": [ + 62, + 680, + 295, + 713 + ], + "type": "text", + "content": "[34] Daniel Haase and Manuel Amthor. Rethinking depthwise separable convolutions: How intra-kernel correlations lead to improved mobilenets. In Proceedings of the IEEE/CVF" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 320, + 74, + 553, + 713 + ], + "type": "list", + "angle": 0, + "index": 27, + "blocks": [ + { + "bbox": [ + 339, + 74, + 553, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 339, + 74, + 553, + 95 + ], + "spans": [ + { + "bbox": [ + 339, + 74, + 553, + 95 + ], + "type": "text", + "content": "conference on computer vision and pattern recognition, pages 14600-14609, 2020. 31, 37" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 321, + 96, + 553, + 140 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 321, + 96, + 553, + 140 + ], + "spans": [ + { + "bbox": [ + 321, + 96, + 553, + 140 + ], + "type": "text", + "content": "[35] Kai Han, Yunhe Wang, Qi Tian, Jianyuan Guo, Chunjing Xu, and Chang Xu. Ghostnet: More features from cheap operations. In IEEE Conf. Comput. Vis. Pattern Recog., pages 1580-1589, 2020. 19" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 320, + 141, + 553, + 207 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 141, + 553, + 207 + ], + "spans": [ + { + "bbox": [ + 320, + 141, + 553, + 207 + ], + "type": "text", + "content": "[36] Shuhao Han, Haotian Fan, Fangyuan Kong, Wenjie Liao, Chunle Guo, Chongyi Li, Radu Timofte, et al. NTIRE 2025 challenge on text to image generation model quality assessment. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 320, + 209, + 553, + 262 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 209, + 553, + 262 + ], + "spans": [ + { + "bbox": [ + 320, + 209, + 553, + 262 + ], + "type": "text", + "content": "[37] Zibin He, Tao Dai, Jian Lu, Yong Jiang, and Shu-Tao Xia. Faked: Feature-affinity based knowledge distillation for efficient image super-resolution. In 2020 IEEE international conference on image processing (ICIP), pages 518-522. IEEE, 2020. 7" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 320, + 264, + 553, + 286 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 264, + 553, + 286 + ], + "spans": [ + { + "bbox": [ + 320, + 264, + 553, + 286 + ], + "type": "text", + "content": "[38] Dan Hendrycks and Kevin Gimpel. Gaussian error linear units (gelus). arXiv preprint arXiv:1606.08415, 2016. 25" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 320, + 288, + 553, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 288, + 553, + 342 + ], + "spans": [ + { + "bbox": [ + 320, + 288, + 553, + 342 + ], + "type": "text", + "content": "[39] Andrew Howard, Mark Sandler, Grace Chu, Liang-Chieh Chen, Bo Chen, Mingxing Tan, Weijun Wang, Yukun Zhu, Ruoming Pang, Vijay Vasudevan, et al. Searching for MobileNetV3. In Proceedings of the IEEE International Conference on Computer Vision, pages 1314-1324, 2019. 26" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 320, + 344, + 553, + 399 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 344, + 553, + 399 + ], + "spans": [ + { + "bbox": [ + 320, + 344, + 553, + 399 + ], + "type": "text", + "content": "[40] Andrew G Howard, Menglong Zhu, Bo Chen, Dmitry Kalenichenko, Weijun Wang, Tobias Weyand, Marco Andreetto, and Hartwig Adam. Mobilenets: Efficient convolutional neural networks for mobile vision applications. arXiv preprint arXiv:1704.04861, 2017. 37" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 320, + 399, + 553, + 442 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 399, + 553, + 442 + ], + "spans": [ + { + "bbox": [ + 320, + 399, + 553, + 442 + ], + "type": "text", + "content": "[41] Mu Hu, Junyi Feng, Jiashen Hua, Baisheng Lai, Jianqiang Huang, Xiaojin Gong, and Xian-Sheng Hua. Online convolutional re-parameterization. CoRR, abs/2204.00826, 2022. 19" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 320, + 445, + 553, + 498 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 445, + 553, + 498 + ], + "spans": [ + { + "bbox": [ + 320, + 445, + 553, + 498 + ], + "type": "text", + "content": "[42] Zhewei Huang, Tianyuan Zhang, Wen Heng, Boxin Shi, and Shuchang Zhou. Real-time intermediate flow estimation for video frame interpolation. In Proceedings of the European Conference on Computer Vision (ECCV), 2022. 6, 9" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 320, + 501, + 553, + 555 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 501, + 553, + 555 + ], + "spans": [ + { + "bbox": [ + 320, + 501, + 553, + 555 + ], + "type": "text", + "content": "[43] Zheng Hui, Xiumei Wang, and Xinbo Gao. Fast and accurate single image super-resolution via information distillation network. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 723-731, 2018. 36" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 320, + 557, + 553, + 610 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 557, + 553, + 610 + ], + "spans": [ + { + "bbox": [ + 320, + 557, + 553, + 610 + ], + "type": "text", + "content": "[44] Zheng Hui, Xinbo Gao, Yunchu Yang, and Xiumei Wang. Lightweight image super-resolution with information multi-distillation network. In Proceedings of the 27th acm international conference on multimedia, pages 2024-2032, 2019. 11" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 320, + 613, + 553, + 667 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 613, + 553, + 667 + ], + "spans": [ + { + "bbox": [ + 320, + 613, + 553, + 667 + ], + "type": "text", + "content": "[45] Zheng Hui, Xinbo Gao, Yunchu Yang, and Xiumei Wang. Lightweight image super-resolution with information multi-distillation network. In Proceedings of the 27th acm international conference on multimedia, pages 2024-2032, 2019. 10, 36" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 320, + 670, + 553, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 670, + 553, + 713 + ], + "spans": [ + { + "bbox": [ + 320, + 670, + 553, + 713 + ], + "type": "text", + "content": "[46] Pavel Izmailov, Dmitrii Podoprikhin, Timur Garipov, Dmitry Vetrov, and Andrew Gordon Wilson. Averaging weights leads to wider optima and better generalization. arXiv preprint arXiv:1803.05407, 2018. 23" + } + ] + } + ], + "index": 26 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 732, + 310, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 732, + 310, + 741 + ], + "spans": [ + { + "bbox": [ + 299, + 732, + 310, + 741 + ], + "type": "text", + "content": "45" + } + ] + } + ], + "index": 28 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 44 + }, + { + "para_blocks": [ + { + "bbox": [ + 61, + 73, + 295, + 713 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 61, + 73, + 294, + 139 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 73, + 294, + 139 + ], + "spans": [ + { + "bbox": [ + 61, + 73, + 294, + 139 + ], + "type": "text", + "content": "[47] Varun Jain, Zongwei Wu, Quan Zou, Louis Florentin, Henrik Turbell, Sandeep Siddhartha, Radu Timofte, et al. NTIRE 2025 challenge on video quality enhancement for video conferencing: Datasets, methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 61, + 140, + 294, + 184 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 140, + 294, + 184 + ], + "spans": [ + { + "bbox": [ + 61, + 140, + 294, + 184 + ], + "type": "text", + "content": "[48] Yuxuan Jiang, Chen Feng, Fan Zhang, and David Bull. Mtkd: Multi-teacher knowledge distillation for image super-resolution. In European Conference on Computer Vision, pages 364–382. Springer, 2024. 30, 31" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 62, + 186, + 295, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 186, + 295, + 239 + ], + "spans": [ + { + "bbox": [ + 62, + 186, + 295, + 239 + ], + "type": "text", + "content": "[49] Yuxuan Jiang, Ho Man Kwan, Tianhao Peng, Ge Gao, Fan Zhang, Xiaqing Zhu, Joel Sole, and David Bull. HIIF: Hierarchical encoding based implicit image function for continuous super-resolution. arXiv preprint arXiv:2412.03748, 2024. 30" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 62, + 242, + 295, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 242, + 295, + 285 + ], + "spans": [ + { + "bbox": [ + 62, + 242, + 295, + 285 + ], + "type": "text", + "content": "[50] Yuxuan Jiang, Jakub Nawala, Chen Feng, Fan Zhang, Xiaogjing Zhu, Joel Sole, and David Bull. Rtsr: A real-time super-resolution model for av1 compressed content. arXiv preprint arXiv:2411.13362, 2024. 30" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 62, + 286, + 294, + 330 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 286, + 294, + 330 + ], + "spans": [ + { + "bbox": [ + 62, + 286, + 294, + 330 + ], + "type": "text", + "content": "[51] Yuxuan Jiang, Jakub Nawala, Fan Zhang, and David Bull. Compressing deep image super-resolution models. In 2024 Picture Coding Symposium (PCS), pages 1-5. IEEE, 2024. 14, 30" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 62, + 332, + 294, + 385 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 332, + 294, + 385 + ], + "spans": [ + { + "bbox": [ + 62, + 332, + 294, + 385 + ], + "type": "text", + "content": "[52] Yuxuan Jiang, Chengxi Zeng, Siyue Teng, Fan Zhang, Xiaogjing Zhu, Joel Sole, and David Bull. C2D-ISR: Optimizing attention-based image super-resolution from continuous to discrete scales. arXiv preprint arXiv:2503.13740, 2025. 30, 31" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 62, + 388, + 294, + 420 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 388, + 294, + 420 + ], + "spans": [ + { + "bbox": [ + 62, + 388, + 294, + 420 + ], + "type": "text", + "content": "[53] Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014. 12" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 62, + 422, + 294, + 454 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 422, + 294, + 454 + ], + "spans": [ + { + "bbox": [ + 62, + 422, + 294, + 454 + ], + "type": "text", + "content": "[54] Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014. 8, 14, 18, 28, 30" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 62, + 456, + 294, + 510 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 456, + 294, + 510 + ], + "spans": [ + { + "bbox": [ + 62, + 456, + 294, + 510 + ], + "type": "text", + "content": "[55] F. Kong, Mingxi Li, Songwei Liu, Ding Liu, Jingwen He, Yang Bai, Fangmin Chen, and Lean Fu. Residual local feature network for efficient super-resolution. 2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW), pages 765-775, 2022. 19, 22" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 62, + 513, + 294, + 567 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 513, + 294, + 567 + ], + "spans": [ + { + "bbox": [ + 62, + 513, + 294, + 567 + ], + "type": "text", + "content": "[56] Fangyuan Kong, Mingxi Li, Songwei Liu, Ding Liu, Jingwen He, Yang Bai, Fangmin Chen, and Lean Fu. Residual local feature network for efficient super-resolution. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 766-776, 2022. 18, 35" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 62, + 569, + 294, + 611 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 569, + 294, + 611 + ], + "spans": [ + { + "bbox": [ + 62, + 569, + 294, + 611 + ], + "type": "text", + "content": "[57] Kin Wai Lau, Lai-Man Po, and Yasar Abbas Ur Rehman. Large separable kernel attention: Rethinking the large kernel attention design in cnn. Expert Systems with Applications, 236:121352, 2023. 28" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 62, + 614, + 294, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 614, + 294, + 689 + ], + "spans": [ + { + "bbox": [ + 62, + 614, + 294, + 689 + ], + "type": "text", + "content": "[58] Sangmin Lee, Eunpil Park, Angel Canelo, Hyunhee Park, Youngjo Kim, Hyungju Chun, Xin Jin, Chongyi Li, Chun-Le Guo, Radu Timofte, et al. NTIRE 2025 challenge on efficient burst hdr and restoration: Datasets, methods, and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 62, + 692, + 294, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 692, + 294, + 713 + ], + "spans": [ + { + "bbox": [ + 62, + 692, + 294, + 713 + ], + "type": "text", + "content": "[59] Xiaoyan Lei, Wenlong Zhang, and Weifeng Cao. Dvmsr: Distillated vision mamba for efficient super-resolution. In" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 321, + 73, + 553, + 713 + ], + "type": "list", + "angle": 0, + "index": 24, + "blocks": [ + { + "bbox": [ + 339, + 73, + 553, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 339, + 73, + 553, + 106 + ], + "spans": [ + { + "bbox": [ + 339, + 73, + 553, + 106 + ], + "type": "text", + "content": "Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, pages 6536-6546, 2024. 33" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 321, + 108, + 553, + 162 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 321, + 108, + 553, + 162 + ], + "spans": [ + { + "bbox": [ + 321, + 108, + 553, + 162 + ], + "type": "text", + "content": "[60] Duo Li, Jie Hu, Changhu Wang, Xiangtai Li, Qi She, Lei Zhu, Tong Zhang, and Qifeng Chen. Involution: Inverting the inheritance of convolution for visual recognition. Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2021. 37" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 321, + 164, + 553, + 240 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 321, + 164, + 553, + 240 + ], + "spans": [ + { + "bbox": [ + 321, + 164, + 553, + 240 + ], + "type": "text", + "content": "[61] Xin Li, Yeying Jin, Xin Jin, Zongwei Wu, Bingchen Li, Yufei Wang, Wenhan Yang, Yu Li, Zhibo Chen, Bihan Wen, Robby Tan, Radu Timofte, et al. NTIRE 2025 challenge on day and night raindrop removal for dual-focused images: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 321, + 243, + 553, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 321, + 243, + 553, + 319 + ], + "spans": [ + { + "bbox": [ + 321, + 243, + 553, + 319 + ], + "type": "text", + "content": "[62] Xin Li, Xijun Wang, Bingchen Li, Kun Yuan, Yizhen Shao, Suhang Yao, Ming Sun, Chao Zhou, Radu Timofte, and Zhibo Chen. NTIRE 2025 challenge on short-formUGC video quality assessment and enhancement: Kwaisr dataset and study. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 321, + 321, + 553, + 398 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 321, + 321, + 553, + 398 + ], + "spans": [ + { + "bbox": [ + 321, + 321, + 553, + 398 + ], + "type": "text", + "content": "[63] Xin Li, Kun Yuan, Bingchen Li, Fengbin Guan, Yizhen Shao, Zihao Yu, Xijun Wang, Yiting Lu, Wei Luo, Suhang Yao, Ming Sun, Chao Zhou, Zhibo Chen, Radu Timofte, et al. NTIRE 2025 challenge on short-formUGC video quality assessment and enhancement: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 321, + 399, + 553, + 464 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 321, + 399, + 553, + 464 + ], + "spans": [ + { + "bbox": [ + 321, + 399, + 553, + 464 + ], + "type": "text", + "content": "[64] Yawei Li, Kai Zhang, Jingyun Liang, Jiezhang Cao, Ce Liu, Rui Gong, Yulun Zhang, Hao Tang, Yun Liu, Denis Demandolx, et al. Lsdir: A large scale dataset for image restoration. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops, 2023. 2, 6, 10, 12, 14, 16, 17, 18, 19, 23, 24, 26, 28, 30, 33, 36" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 321, + 468, + 553, + 521 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 321, + 468, + 553, + 521 + ], + "spans": [ + { + "bbox": [ + 321, + 468, + 553, + 521 + ], + "type": "text", + "content": "[65] Yawei Li, Yulun Zhang, Luc Van Gool, Radu Timofte, et al. NTIRE 2023 challenge on efficient super-resolution: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops, 2023. 15, 16" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 321, + 523, + 553, + 588 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 321, + 523, + 553, + 588 + ], + "spans": [ + { + "bbox": [ + 321, + 523, + 553, + 588 + ], + "type": "text", + "content": "[66] Zheyuan Li, Yingqi Liu, Xiangyu Chen, Haoming Cai, Jinjin Gu, Yu Qiao, and Chao Dong. Blueprint separable residual network for efficient image super-resolution. In 2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW), pages 832-842, 2022. 13, 26" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 321, + 591, + 553, + 646 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 321, + 591, + 553, + 646 + ], + "spans": [ + { + "bbox": [ + 321, + 591, + 553, + 646 + ], + "type": "text", + "content": "[67] Zheyuan Li, Yingqi Liu, Xiangyu Chen, Haoming Cai, Jinjin Gu, Yu Qiao, and Chao Dong. Blueprint separable residual network for efficient image super-resolution. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 833-843, 2022. 10, 36" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 321, + 647, + 553, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 321, + 647, + 553, + 713 + ], + "spans": [ + { + "bbox": [ + 321, + 647, + 553, + 713 + ], + "type": "text", + "content": "[68] Jie Liang, Radu Timofte, Qiaosi Yi, Zhengqiang Zhang, Shuaizheng Liu, Lingchen Sun, Rongyuan Wu, Xindong Zhang, Hui Zeng, Lei Zhang, et al. NTIRE 2025 the 2nd restore any image model (RAIM) in the wild challenge. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + } + ] + } + ], + "index": 23 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 733, + 311, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 733, + 311, + 742 + ], + "spans": [ + { + "bbox": [ + 299, + 733, + 311, + 742 + ], + "type": "text", + "content": "46" + } + ] + } + ], + "index": 25 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 45 + }, + { + "para_blocks": [ + { + "bbox": [ + 61, + 72, + 297, + 714 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 61, + 72, + 297, + 127 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 72, + 297, + 127 + ], + "spans": [ + { + "bbox": [ + 61, + 72, + 297, + 127 + ], + "type": "text", + "content": "[69] Bee Lim, Sanghyun Son, Heewon Kim, Seungjun Nah, and Kyoung Mu Lee. Enhanced deep residual networks for single image super-resolution. In 2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW), pages 1132-1140, 2017. 14" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 61, + 128, + 296, + 183 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 128, + 296, + 183 + ], + "spans": [ + { + "bbox": [ + 61, + 128, + 296, + 183 + ], + "type": "text", + "content": "[70] Bee Lim, Sanghyun Son, Heewon Kim, Seungjun Nah, and Young Mu Lee. Enhanced deep residual networks for single image super-resolution. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops, pages 1132-1140, 2017. 12, 17, 26, 28, 30" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 62, + 184, + 295, + 227 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 184, + 295, + 227 + ], + "spans": [ + { + "bbox": [ + 62, + 184, + 295, + 227 + ], + "type": "text", + "content": "[71] Jie Liu, Jie Tang, and Gangshan Wu. Residual feature distillation network for lightweight image super-resolution. In Proceedings of the European Conference on Computer Vision Workshops, pages 41-55. Springer, 2020. 10, 32, 36" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 62, + 228, + 295, + 282 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 228, + 295, + 282 + ], + "spans": [ + { + "bbox": [ + 62, + 228, + 295, + 282 + ], + "type": "text", + "content": "[72] Jie Liu, Jie Tang, and Gangshan Wu. Residual feature distillation network for lightweight image super-resolution. In Computer Vision-ECCV 2020 Workshops: Glasgow, UK, August 23-28, 2020, Proceedings, Part III 16, pages 41-55. Springer, 2020. 21" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 62, + 284, + 295, + 337 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 284, + 295, + 337 + ], + "spans": [ + { + "bbox": [ + 62, + 284, + 295, + 337 + ], + "type": "text", + "content": "[73] Jie Liu, Wenjie Zhang, Yuting Tang, Jie Tang, and Gangshan Wu. Residual feature aggregation network for image super-resolution. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 2359-2368, 2020. 11" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 62, + 338, + 295, + 392 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 338, + 295, + 392 + ], + "spans": [ + { + "bbox": [ + 62, + 338, + 295, + 392 + ], + "type": "text", + "content": "[74] Xiaohong Liu, Xiongkuo Min, Qiang Hu, Xiaoyun Zhang, Jie Guo, et al. NTIRE 2025 XGC quality assessment challenge: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 62, + 393, + 295, + 459 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 393, + 295, + 459 + ], + "spans": [ + { + "bbox": [ + 62, + 393, + 295, + 459 + ], + "type": "text", + "content": "[75] Xiaoning Liu, Zongwei Wu, Florin-Alexandru Vasluianu, Hailong Yan, Bin Ren, Yulun Zhang, Shuhang Gu, Le Zhang, Ce Zhu, Radu Timofte, et al. NTIRE 2025 challenge on low light image enhancement: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 62, + 459, + 295, + 491 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 459, + 295, + 491 + ], + "spans": [ + { + "bbox": [ + 62, + 459, + 295, + 491 + ], + "type": "text", + "content": "[76] Zhuang Liu, Mingjie Sun, Tinghui Zhou, Gao Huang, and Trevor Darrell. Rethinking the value of network pruning. In ICLR, 2019. 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 62, + 492, + 295, + 548 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 492, + 295, + 548 + ], + "spans": [ + { + "bbox": [ + 62, + 492, + 295, + 548 + ], + "type": "text", + "content": "[77] Zhaoyang Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, and Baining Guo. Proceedings of the IEEE/cvf international conference on computer vision. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 10012-10022, 2021. 12" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 62, + 548, + 295, + 569 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 548, + 295, + 569 + ], + "spans": [ + { + "bbox": [ + 62, + 548, + 295, + 569 + ], + "type": "text", + "content": "[78] Ilya Loshchilov and Frank Hutter. Sgdr: Stochastic gradient descent with warm restarts. In *ICLR*, 2017, 17, 29" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 62, + 570, + 295, + 624 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 570, + 295, + 624 + ], + "spans": [ + { + "bbox": [ + 62, + 570, + 295, + 624 + ], + "type": "text", + "content": "[79] Qi Ma, Yue Li, Bin Ren, Nicu Sebe, Ender Konukoglu, Theo Gevers, Luc Van Gool, and Danda Pani Paudel. Shapesplat: A large-scale dataset of gaussian splats and their self-supervised pretraining. In International Conference on 3D Vision 2025, 2024. 2" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 62, + 625, + 295, + 691 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 625, + 295, + 691 + ], + "spans": [ + { + "bbox": [ + 62, + 625, + 295, + 691 + ], + "type": "text", + "content": "[80] Yanyu Mao, Nihao Zhang, Qian Wang, Bendu Bai, Wanying Bai, Haonan Fang, Peng Liu, Mingyue Li, and Shengbo Yan. Multi-level dispersion residual network for efficient image super-resolution. In 2023 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW), pages 1660-1669, 2023. 12" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 62, + 691, + 295, + 714 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 691, + 295, + 714 + ], + "spans": [ + { + "bbox": [ + 62, + 691, + 295, + 714 + ], + "type": "text", + "content": "[81] Yanyu Mao, Nihao Zhang, Qian Wang, Bendu Bai, Wanying Bai, Haonan Fang, Peng Liu, Mingyue Li, and Shengbo" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 320, + 73, + 554, + 713 + ], + "type": "list", + "angle": 0, + "index": 26, + "blocks": [ + { + "bbox": [ + 338, + 73, + 554, + 117 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 338, + 73, + 554, + 117 + ], + "spans": [ + { + "bbox": [ + 338, + 73, + 554, + 117 + ], + "type": "text", + "content": "Yan. Multi-level dispersion residual network for efficient image super-resolution. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops, pages 1660-1669, 2023. 10, 11, 28" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 321, + 118, + 553, + 173 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 321, + 118, + 553, + 173 + ], + "spans": [ + { + "bbox": [ + 321, + 118, + 553, + 173 + ], + "type": "text", + "content": "[82] Jakub Nawala, Yuxuan Jiang, Fan Zhang, Xiaqing Zhu, Joel Sole, and David Bull. Bvi-aom: A new training dataset for deep video compression optimization. In 2024 IEEE International Conference on Visual Communications and Image Processing (VCIP), pages 1-5. IEEE, 2024. 30" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 320, + 175, + 553, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 175, + 553, + 217 + ], + "spans": [ + { + "bbox": [ + 320, + 175, + 553, + 217 + ], + "type": "text", + "content": "[83] Ying Nie, Kai Han, Zhenhua Liu, An Xiao, Yiping Deng, Chunjing Xu, and Yunhe Wang. Ghostsr: Learning ghost features for efficient image super-resolution. CoRR, abs/2101.08525, 2021. 19" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 320, + 220, + 553, + 262 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 220, + 553, + 262 + ], + "spans": [ + { + "bbox": [ + 320, + 220, + 553, + 262 + ], + "type": "text", + "content": "[84] Seung Park, Yoon-Jae Yeo, and Yong-Goo Shin. Pconv: simple yet effective convolutional layer for generative adversarial network. Neural Computing and Applications, 34 (9):7113-7124, 2022. 37, 38" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 321, + 264, + 553, + 308 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 321, + 264, + 553, + 308 + ], + "spans": [ + { + "bbox": [ + 321, + 264, + 553, + 308 + ], + "type": "text", + "content": "[85] Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, and Soumith Chintala. Pytorch: An imperative style, high-performance deep learning library. arXiv preprint arXiv:1912.01703, 2019. 18" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 321, + 310, + 553, + 375 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 321, + 310, + 553, + 375 + ], + "spans": [ + { + "bbox": [ + 321, + 310, + 553, + 375 + ], + "type": "text", + "content": "[86] Danfeng Qin, Chas Leichner, Manolis Delakis, Marco Fornoni, Shixin Luo, Fan Yang, Weijun Wang, Colby Banbury, Chengxi Ye, Berkin Akin, Vaibhav Aggarwal, Tenghui Zhu, Daniele Moro, and Andrew Howard. Mobilenetv4 - universal models for the mobile ecosystem, 2024. 37, 38" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 321, + 377, + 553, + 421 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 321, + 377, + 553, + 421 + ], + "spans": [ + { + "bbox": [ + 321, + 377, + 553, + 421 + ], + "type": "text", + "content": "[87] Yajun Qiu, Qiang Zhu, Shuyuan Zhu, and Bing Zeng. Dual circle contrastive learning-based blind image superresolution. IEEE Transactions on Circuits and Systems for Video Technology, 34(3):1757-1771, 2023. 30" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 321, + 422, + 553, + 465 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 321, + 422, + 553, + 465 + ], + "spans": [ + { + "bbox": [ + 321, + 422, + 553, + 465 + ], + "type": "text", + "content": "[88] Yunpeng Qu, Kun Yuan, Jinhua Hao, Kai Zhao, Qizhi Xie, Ming Sun, and Chao Zhou. Visual autoregressive modeling for image super-resolution. arXiv preprint arXiv:2501.18993, 2025. 35" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 321, + 468, + 553, + 522 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 321, + 468, + 553, + 522 + ], + "spans": [ + { + "bbox": [ + 321, + 468, + 553, + 522 + ], + "type": "text", + "content": "[89] Bin Ren, Yahui Liu, Yue Song, Wei Bi, Rita Cucchiara, Nicu Sebe, and Wei Wang. Masked jigsaw puzzle: A versatile position embedding for vision transformers. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 20382-20391, 2023. 2" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 321, + 523, + 553, + 579 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 321, + 523, + 553, + 579 + ], + "spans": [ + { + "bbox": [ + 321, + 523, + 553, + 579 + ], + "type": "text", + "content": "[90] Bin Ren, Yawei Li, Jingyun Liang, Rakesh Ranjan, Mengyuan Liu, Rita Cucchiara, Luc V Gool, Ming-Hsuan Yang, and Nicu Sebe. Sharing key semantics in transformer makes efficient image restoration. Advances in Neural Information Processing Systems, 37:7427-7463, 2024. 2" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 321, + 580, + 553, + 656 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 321, + 580, + 553, + 656 + ], + "spans": [ + { + "bbox": [ + 321, + 580, + 553, + 656 + ], + "type": "text", + "content": "[91] Bin Ren, Yawei Li, Nancy Mehta, Radu Timofte, Hongyuan Yu, Cheng Wan, Yuxin Hong, Bingnan Han, Zhuoyuan Wu, Yajun Zou, et al. The ninth nitire 2024 efficient super-resolution challenge report. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6595-6631, 2024. 2, 3, 4, 6, 17, 21, 35, 38" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 321, + 658, + 553, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 321, + 658, + 553, + 713 + ], + "spans": [ + { + "bbox": [ + 321, + 658, + 553, + 713 + ], + "type": "text", + "content": "[92] Bin Ren, Hang Guo, Lei Sun, Zongwei Wu, Radu Timofte, Yawei Li, et al. The tenth NTIRE 2025 efficient super-resolution challenge report. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + } + ] + } + ], + "index": 25 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 732, + 310, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 732, + 310, + 741 + ], + "spans": [ + { + "bbox": [ + 299, + 732, + 310, + 741 + ], + "type": "text", + "content": "47" + } + ] + } + ], + "index": 27 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 46 + }, + { + "para_blocks": [ + { + "bbox": [ + 57, + 73, + 294, + 712 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 61, + 73, + 294, + 139 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 73, + 294, + 139 + ], + "spans": [ + { + "bbox": [ + 61, + 73, + 294, + 139 + ], + "type": "text", + "content": "[93] Nickolay Safonov, Alexey Bryntsev, Andrey Moskalenko, Dmitry Kulikov, Dmitriy Vatolin, Radu Timofte, et al. NTIRE 2025 challenge on UGC video enhancement: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 61, + 140, + 294, + 205 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 140, + 294, + 205 + ], + "spans": [ + { + "bbox": [ + 61, + 140, + 294, + 205 + ], + "type": "text", + "content": "[94] Wenzhe Shi, Jose Caballero, Ferenc Huszár, Johannes Totz, Andrew P Aitken, Rob Bishop, Daniel Rueckert, and Zehan Wang. Real-time single image and video super-resolution using an efficient sub-pixel convolutional neural network. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 1874-1883, 2016. 25" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 62, + 206, + 294, + 249 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 206, + 294, + 249 + ], + "spans": [ + { + "bbox": [ + 62, + 206, + 294, + 249 + ], + "type": "text", + "content": "[95] Long Sun, Jinshan Pan, and Jinhui Tang. Shufflemixer: An efficient convnet for image super-resolution. Advances in Neural Information Processing Systems, 35:17314-17326, 2022. 29" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 62, + 250, + 294, + 282 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 250, + 294, + 282 + ], + "spans": [ + { + "bbox": [ + 62, + 250, + 294, + 282 + ], + "type": "text", + "content": "[96] Long Sun, Jiangxin Dong, Jinhui Tang, and Jinshan Pan. Spatially-adaptive feature modulation for efficient image super-resolution. In ICCV, 2023. 17" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 62, + 284, + 294, + 348 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 284, + 294, + 348 + ], + "spans": [ + { + "bbox": [ + 62, + 284, + 294, + 348 + ], + "type": "text", + "content": "[97] Lei Sun, Andrea Alfarano, Peiqi Duan, Shaolin Su, Kaiwei Wang, Boxin Shi, Radu Timofte, Danda Pani Paudel, Luc Van Gool, et al. NTIRE 2025 challenge on event-based image deblurring: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 62, + 350, + 294, + 403 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 350, + 294, + 403 + ], + "spans": [ + { + "bbox": [ + 62, + 350, + 294, + 403 + ], + "type": "text", + "content": "[98] Lei Sun, Hang Guo, Bin Ren, Luc Van Gool, Radu Timofte, Yawei Li, et al. The tenth ntire 2025 image denoising challenge report. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 62, + 405, + 294, + 470 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 405, + 294, + 470 + ], + "spans": [ + { + "bbox": [ + 62, + 405, + 294, + 470 + ], + "type": "text", + "content": "[99] Yunlong Tang, Junjia Guo, Pinxin Liu, Zhiyuan Wang, Hang Hua, Jia-Xing Zhong, Yunzhong Xiao, Chao Huang, Luchuan Song, Susan Liang, Yizhi Song, Liu He, Jing Bi, Mingqian Feng, Xinyang Li, Zeliang Zhang, and Chen-liang Xu. Generative ai for cel-animation: A survey. arXiv preprint arXiv:2501.06250, 2025. 14" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 57, + 471, + 294, + 514 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 471, + 294, + 514 + ], + "spans": [ + { + "bbox": [ + 57, + 471, + 294, + 514 + ], + "type": "text", + "content": "[100] Radu Timofte, Eirikur Agustsson, Luc Van Gool, Ming-Hsuan Yang, and Lei Zhang. Ntire 2017 challenge on single image super-resolution: Methods and results. In CVPR Workshops, 2017. 10, 17" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 57, + 515, + 294, + 569 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 515, + 294, + 569 + ], + "spans": [ + { + "bbox": [ + 57, + 515, + 294, + 569 + ], + "type": "text", + "content": "[101] Radu Timofte, Eirikur Agustsson, Luc Van Gool, Ming-Hsuan Yang, and Lei Zhang. Ntire 2017 challenge on single image super-resolution: Methods and results. In Proceedings of the IEEE conference on computer vision and pattern recognition workshops, pages 114-125, 2017. 23, 33" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 57, + 571, + 294, + 613 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 571, + 294, + 613 + ], + "spans": [ + { + "bbox": [ + 57, + 571, + 294, + 613 + ], + "type": "text", + "content": "[102] Radu Timofte, Eirikur Agustsson, Luc Van Gool, Ming-Hsuan Yang, and Lei Zhang. Ntire 2017 challenge on single image super-resolution: Methods and results. In CVPR workshops, pages 114-125, 2017. 12, 30" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 57, + 615, + 294, + 658 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 615, + 294, + 658 + ], + "spans": [ + { + "bbox": [ + 57, + 615, + 294, + 658 + ], + "type": "text", + "content": "[103] Radu Timofte, Eirikur Agustsson, Luc Van Gool, Ming-Hsuan Yang, Lei Zhang, et al. NTIRE 2017 challenge on single image super-resolution: Methods and results. In CVPR Workshops, 2017. 17, 28" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 57, + 658, + 294, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 658, + 294, + 712 + ], + "spans": [ + { + "bbox": [ + 57, + 658, + 294, + 712 + ], + "type": "text", + "content": "[104] Radu Timofte, Eirikur Agustsson, Shuhang Gu, J Wu, A Ignatov, and L Van Gool. Div2k dataset: Diverse 2k resolution high quality images as used for the challenges@ ntire (cvpr 2017 and cvpr 2018) and@ pirm (eccv 2018), 2018. 24, 36" + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 316, + 73, + 555, + 713 + ], + "type": "list", + "angle": 0, + "index": 24, + "blocks": [ + { + "bbox": [ + 316, + 73, + 554, + 128 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 73, + 554, + 128 + ], + "spans": [ + { + "bbox": [ + 316, + 73, + 554, + 128 + ], + "type": "text", + "content": "[105] Florin-Alexandru Vasluianu, Tim Seizinger, Zhuyun Zhou, Caitian Chen, Zongwei Wu, Radu Timofte, et al. NTIRE 2025 image shadow removal challenge report. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 317, + 129, + 555, + 184 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 129, + 555, + 184 + ], + "spans": [ + { + "bbox": [ + 317, + 129, + 555, + 184 + ], + "type": "text", + "content": "[106] Florin-Alexandru Vasluianu, Tim Seizinger, Zhuyun Zhou, Zongwei Wu, Radu Timofte, et al. NTIRE 2025 ambient lighting normalization challenge. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 317, + 186, + 554, + 229 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 186, + 554, + 229 + ], + "spans": [ + { + "bbox": [ + 317, + 186, + 554, + 229 + ], + "type": "text", + "content": "[107] Pavan Kumar Anasosalu Vasu, James Gabriel, Jeff Zhu, Oncel Tuzel, and Anurag Ranjan. An improved one millisecond mobile backbone. arXiv preprint arXiv:2206.04040, 2022. 9" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 317, + 232, + 554, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 232, + 554, + 285 + ], + "spans": [ + { + "bbox": [ + 317, + 232, + 554, + 285 + ], + "type": "text", + "content": "[108] Cheng Wan, Hongyuan Yu, Zhiqi Li, Yihang Chen, Yajun Zou, Yuqing Liu, Xuanwu Yin, and Kunlong Zuo. Swift parameter-free attention network for efficient superresolution. arXiv preprint arXiv:2311.12770, 2023. 34, 35, 38" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 316, + 287, + 554, + 352 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 287, + 554, + 352 + ], + "spans": [ + { + "bbox": [ + 316, + 287, + 554, + 352 + ], + "type": "text", + "content": "[109] Cheng Wan, Hongyuan Yu, Zhiqi Li, Yihang Chen, Ya-jun Zou, Yuqing Liu, Xuanwu Yin, and Kunlong Zuo. Swift parameter-free attention network for efficient super-resolution. In 2024 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW), pages 6246-6256, 2024. 12, 13" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 316, + 354, + 554, + 409 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 354, + 554, + 409 + ], + "spans": [ + { + "bbox": [ + 316, + 354, + 554, + 409 + ], + "type": "text", + "content": "[110] Cheng Wan, Hongyuan Yu, Zhiqi Li, Yihang Chen, Yajun Zou, Yuqing Liu, Xuanwu Yin, and Kunlong Zuo. Swift parameter-free attention network for efficient superresolution. In IEEE Conf. Comput. Vis. Pattern Recog. Worksh., 2024. NTIRE 2024 ESR Challenge. 21" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 316, + 411, + 554, + 476 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 411, + 554, + 476 + ], + "spans": [ + { + "bbox": [ + 316, + 411, + 554, + 476 + ], + "type": "text", + "content": "[111] Cheng Wan, Hongyuan Yu, Zhiqi Li, Yihang Chen, Yajun Zou, Yuqing Liu, Xuanwu Yin, and Kunlong Zuo. Swift parameter-free attention network for efficient superresolution. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6246-6256, 2024. 9, 20" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 317, + 478, + 554, + 544 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 478, + 554, + 544 + ], + "spans": [ + { + "bbox": [ + 317, + 478, + 554, + 544 + ], + "type": "text", + "content": "[112] Cheng Wan, Hongyuan Yu, Zhiqi Li, Yihang Chen, Yajun Zou, Yuqing Liu, Xuanwu Yin, and Kunlong Zuo. Swift parameter-free attention network for efficient superresolution. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2024. 7, 8, 14, 20, 21, 23, 24, 26, 33, 36" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 317, + 545, + 554, + 599 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 545, + 554, + 599 + ], + "spans": [ + { + "bbox": [ + 317, + 545, + 554, + 599 + ], + "type": "text", + "content": "[113] Hang Wang, Xuanhong Chen, Bingbing Ni, Yutian Liu, and Jinfan Liu. Omni aggregation networks for lightweight image super-resolution. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 22378-22387, 2023. 17" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 317, + 601, + 554, + 667 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 601, + 554, + 667 + ], + "spans": [ + { + "bbox": [ + 317, + 601, + 554, + 667 + ], + "type": "text", + "content": "[114] Hongyuan Wang, Ziyan Wei, Qingting Tang, Shuli Cheng, Liejun Wang, and Yongming Li. Attention guidance distillation network for efficient image super-resolution. In 2024 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW), pages 6287-6296, 2024. 12, 13, 28" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 316, + 669, + 554, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 669, + 554, + 713 + ], + "spans": [ + { + "bbox": [ + 316, + 669, + 554, + 713 + ], + "type": "text", + "content": "[115] Xintao Wang, Liangbin Xie, Ke Yu, Kelvin C.K. Chan, Chen Change Loy, and Chao Dong. BasicSR: Open source image and video restoration toolbox. https://github.com/XPixelGroup/BasicSR, 2022.29" + } + ] + } + ], + "index": 23 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 732, + 310, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 732, + 310, + 741 + ], + "spans": [ + { + "bbox": [ + 299, + 732, + 310, + 741 + ], + "type": "text", + "content": "48" + } + ] + } + ], + "index": 25 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 47 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 73, + 297, + 713 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 56, + 73, + 297, + 117 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 73, + 297, + 117 + ], + "spans": [ + { + "bbox": [ + 56, + 73, + 297, + 117 + ], + "type": "text", + "content": "[116] Yan Wang. Edge-enhanced feature distillation network for efficient super-resolution. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, pages 777-785, 2022. 2, 3, 4, 18, 38" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 117, + 297, + 138 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 117, + 297, + 138 + ], + "spans": [ + { + "bbox": [ + 55, + 117, + 297, + 138 + ], + "type": "text", + "content": "[117] Yan Wang. Edge-enhanced feature distillation network for efficient super-resolution, 2022. 37" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 56, + 140, + 296, + 183 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 140, + 296, + 183 + ], + "spans": [ + { + "bbox": [ + 56, + 140, + 296, + 183 + ], + "type": "text", + "content": "[118] Yucong Wang and Minjie Cai. A single residual network with eta modules and distillation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1970-1980, 2023. 18" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 56, + 184, + 296, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 184, + 296, + 239 + ], + "spans": [ + { + "bbox": [ + 56, + 184, + 296, + 239 + ], + "type": "text", + "content": "[119] Yan Wang, Yusen Li, Gang Wang, and Xiaoguang Liu. Multi-scale attention network for single image superresolution. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2024. 28" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 56, + 239, + 296, + 272 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 239, + 296, + 272 + ], + "spans": [ + { + "bbox": [ + 56, + 239, + 296, + 272 + ], + "type": "text", + "content": "[120] Yan Wang, Yusen Li, Gang Wang, and Xiaoguang Liu. Pla-nusr: Chasing faster convnet for efficient super-resolution. arXiv preprint arXiv:2409.13435, 2024. 26" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 272, + 296, + 338 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 272, + 296, + 338 + ], + "spans": [ + { + "bbox": [ + 56, + 272, + 296, + 338 + ], + "type": "text", + "content": "[121] Yingqian Wang, Zhengyu Liang, Fengyuan Zhang, Lvli Tian, Longguang Wang, Juncheng Li, Jungang Yang, Radu Timofte, Yulan Guo, et al. NTIRE 2025 challenge on light field image super-resolution: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 56, + 338, + 296, + 381 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 338, + 296, + 381 + ], + "spans": [ + { + "bbox": [ + 56, + 338, + 296, + 381 + ], + "type": "text", + "content": "[122] Gang Wu, Junjun Jiang, Junpeng Jiang, and Xianming Liu. Transforming image super-resolution: A convformer-based efficient approach. IEEE Transactions on Image Processing, 2024. 27, 28" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 56, + 383, + 296, + 437 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 383, + 296, + 437 + ], + "spans": [ + { + "bbox": [ + 56, + 383, + 296, + 437 + ], + "type": "text", + "content": "[123] Chengxing Xie, Xiaoming Zhang, Linze Li, Yuqian Fu, Biao Gong, Tianrui Li, and Kai Zhang. Mat: Multi-range attention transformer for efficient image super-resolution. IEEE Transactions on Circuits and Systems for Video Technology, 2025. 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 56, + 437, + 296, + 491 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 437, + 296, + 491 + ], + "spans": [ + { + "bbox": [ + 56, + 437, + 296, + 491 + ], + "type": "text", + "content": "[124] Xingyu Xie, Pan Zhou, Huan Li, Zhouchen Lin, and Shuicheng Yan. Adan: Adaptive nesterov momentum algorithm for faster optimizing deep models. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2024. 26" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 56, + 492, + 297, + 569 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 492, + 297, + 569 + ], + "spans": [ + { + "bbox": [ + 56, + 492, + 297, + 569 + ], + "type": "text", + "content": "[125] Kangning Yang, Jie Cai, Ling Ouyang, Florin-Alexandru Vasluianu, Radu Timofte, Jiaming Ding, Huiming Sun, Lan Fu, Jinlong Li, Chiu Man Ho, Zibo Meng, et al. NTIRE 2025 challenge on single image reflection removal in the wild: Datasets, methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 56, + 570, + 296, + 613 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 570, + 296, + 613 + ], + "spans": [ + { + "bbox": [ + 56, + 570, + 296, + 613 + ], + "type": "text", + "content": "[126] Lingxiao Yang, Ru-Yuan Zhang, Lida Li, and Xiaohua Xie. Simam: A simple, parameter-free attention module for convolutional neural networks. In International conference on machine learning, pages 11863-11874. PMLR, 2021. 6" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 56, + 614, + 296, + 680 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 614, + 296, + 680 + ], + "spans": [ + { + "bbox": [ + 56, + 614, + 296, + 680 + ], + "type": "text", + "content": "[127] Kihwan Yoon, Ganzorig Gankhuyag, Jinman Park, Haengseon Son, and Kyoungwon Min. Casr: Efficient cascade network structure with channel aligned method for 4k real-time single image super-resolution. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7911-7920, 2024. 21" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 56, + 681, + 296, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 681, + 296, + 713 + ], + "spans": [ + { + "bbox": [ + 56, + 681, + 296, + 713 + ], + "type": "text", + "content": "[128] Lei Yu, Xinpeng Li, Youwei Li, Ting Jiang, Qi Wu, Haoqiang Fan, and Shuaicheng Liu. Dipnet: Efficiency distillation and iterative pruning for image super-resolution. In" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 315, + 73, + 555, + 713 + ], + "type": "list", + "angle": 0, + "index": 28, + "blocks": [ + { + "bbox": [ + 339, + 73, + 555, + 105 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 339, + 73, + 555, + 105 + ], + "spans": [ + { + "bbox": [ + 339, + 73, + 555, + 105 + ], + "type": "text", + "content": "Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1692-1701, 2023. 15, 16" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 315, + 107, + 555, + 161 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 107, + 555, + 161 + ], + "spans": [ + { + "bbox": [ + 315, + 107, + 555, + 161 + ], + "type": "text", + "content": "[129] Xiyu Yu, Tongliang Liu, Xinchao Wang, and Dacheng Tao. On compressing deep models by low rank and sparse decomposition. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 7370-7379, 2017. 2" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 315, + 163, + 555, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 163, + 555, + 239 + ], + "spans": [ + { + "bbox": [ + 315, + 163, + 555, + 239 + ], + "type": "text", + "content": "[130] Pierluigi Zama Ramirez, Fabio Tosi, Luigi Di Stefano, Radu Timofte, Alex Costanzino, Matteo Poggi, Samuele Salti, Stefano Mattoccia, et al. NTIRE 2025 challenge on hr depth from images of specular and transparent surfaces. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 315, + 240, + 554, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 240, + 554, + 285 + ], + "spans": [ + { + "bbox": [ + 315, + 240, + 554, + 285 + ], + "type": "text", + "content": "[131] Eduard Zamfir, Zongwei Wu, Nancy Mehta, Yulun Zhang, and Radu Timofte. See more details: Efficient image superresolution by experts mining. In *Forty-first International Conference on Machine Learning*, 2024. 29" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 315, + 285, + 554, + 329 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 285, + 554, + 329 + ], + "spans": [ + { + "bbox": [ + 315, + 285, + 554, + 329 + ], + "type": "text", + "content": "[132] Syed Waqas Zamir, Aditya Arora, Salman Khan, Munawar Hayat, Fahad Shahbaz Khan, and Ming-Hsuan Yang. Restormer: Efficient transformer for high-resolution image restoration. In CVPR, 2022. 10, 28" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 315, + 331, + 554, + 374 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 331, + 554, + 374 + ], + "spans": [ + { + "bbox": [ + 315, + 331, + 554, + 374 + ], + "type": "text", + "content": "[133] Dafeng Zhang, Feiyu Huang, Shizhuo Liu, Xiaobing Wang, and Zhezhu Jin. Swinfir: Revisiting the swinir with fast fourier convolution and improved training for image super-resolution, 2022. 14" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 315, + 376, + 554, + 409 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 376, + 554, + 409 + ], + "spans": [ + { + "bbox": [ + 315, + 376, + 554, + 409 + ], + "type": "text", + "content": "[134] Xiang Zhang. Hit-sr: Hierarchical transformer for efficient image super-resolution. https://github.com/XiangZ-0/HiT-SR, 2024. GitHub repository. 33" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 315, + 410, + 554, + 454 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 410, + 554, + 454 + ], + "spans": [ + { + "bbox": [ + 315, + 410, + 554, + 454 + ], + "type": "text", + "content": "[135] Xiangyu Zhang, Xinyu Zhou, Mengxiao Lin, and Jian Sun. Shufflenet: An extremely efficient convolutional neural network for mobile devices. Proceedings of the IEEE conference on computer vision and pattern recognition, 2018. 37" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 315, + 455, + 554, + 510 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 455, + 554, + 510 + ], + "spans": [ + { + "bbox": [ + 315, + 455, + 554, + 510 + ], + "type": "text", + "content": "[136] Xindong Zhang, Hui Zeng, and Lei Zhang. Edge-oriented convolution block for real-time super resolution on mobile devices. In MM '21: ACM Multimedia Conference, Virtual Event, China, October 20 - 24, 2021, pages 4034-4043. ACM, 2021. 19" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 315, + 510, + 554, + 555 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 510, + 554, + 555 + ], + "spans": [ + { + "bbox": [ + 315, + 510, + 554, + 555 + ], + "type": "text", + "content": "[137] Xindong Zhang, Huiyu Zeng, and Lei Zhang. Edge-oriented convolution block for real-time super resolution on mobile devices. Proceedings of the 29th ACM International Conference on Multimedia, 2021. 19" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 315, + 555, + 554, + 600 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 555, + 554, + 600 + ], + "spans": [ + { + "bbox": [ + 315, + 555, + 554, + 600 + ], + "type": "text", + "content": "[138] Xindong Zhang, Hui Zeng, and Lei Zhang. Edge-oriented convolution block for real-time super resolution on mobile devices. In Proceedings of the 29th ACM International Conference on Multimedia, pages 4034-4043, 2021. 3, 21" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 315, + 601, + 554, + 645 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 601, + 554, + 645 + ], + "spans": [ + { + "bbox": [ + 315, + 601, + 554, + 645 + ], + "type": "text", + "content": "[139] Xiang Zhang, Yulun Zhang, and Fisher Yu. Hit-sr: Hierarchical transformer for efficient image super-resolution. In European Conference on Computer Vision, pages 483-500. Springer, 2024. 30" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 315, + 646, + 554, + 679 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 646, + 554, + 679 + ], + "spans": [ + { + "bbox": [ + 315, + 646, + 554, + 679 + ], + "type": "text", + "content": "[140] Xiang Zhang, Yulun Zhang, and Fisher Yu. Hit-sr: Hierarchical transformer for efficient image super-resolution. arXiv preprint, arXiv:2407.05878, 2024. 33" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 315, + 680, + 555, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 680, + 555, + 713 + ], + "spans": [ + { + "bbox": [ + 315, + 680, + 555, + 713 + ], + "type": "text", + "content": "[141] Yulun Zhang, Kai Zhang, Zheng Chen, Yawei Li, Radu Timofte, et al. NTIRE 2023 challenge on image superresolution (x4): Methods and results. In Proceedings of" + } + ] + } + ], + "index": 27 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 732, + 311, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 732, + 311, + 742 + ], + "spans": [ + { + "bbox": [ + 299, + 732, + 311, + 742 + ], + "type": "text", + "content": "49" + } + ] + } + ], + "index": 29 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 48 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 73, + 295, + 518 + ], + "type": "list", + "angle": 0, + "index": 9, + "blocks": [ + { + "bbox": [ + 81, + 73, + 294, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 81, + 73, + 294, + 95 + ], + "spans": [ + { + "bbox": [ + 81, + 73, + 294, + 95 + ], + "type": "text", + "content": "the IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops, 2023. 30" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 56, + 96, + 295, + 140 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 96, + 295, + 140 + ], + "spans": [ + { + "bbox": [ + 56, + 96, + 295, + 140 + ], + "type": "text", + "content": "[142] Hengyuan Zhao, Xiangtao Kong, Jingwen He, Yu Qiao, and Chao Dong. Efficient image super-resolution using pixel attention. In European Conference on Computer Vision, pages 56-72. Springer, 2020. 26" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 56, + 141, + 294, + 206 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 141, + 294, + 206 + ], + "spans": [ + { + "bbox": [ + 56, + 141, + 294, + 206 + ], + "type": "text", + "content": "[143] Mengyi Zhao, Mengyuan Liu, Bin Ren, Shuling Dai, and Nicu Sebe. Denoising diffusion probabilistic models for action-conditioned 3d motion generation. In ICASSP 2024-2024 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 4225-4229. IEEE, 2024. 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 56, + 208, + 294, + 251 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 208, + 294, + 251 + ], + "spans": [ + { + "bbox": [ + 56, + 208, + 294, + 251 + ], + "type": "text", + "content": "[144] Mingjun Zheng, Long Sun, Jiangxin Dong, and Jinshan Pan. Smfanet: A lightweight self-modulation feature aggregation network for efficient image super-resolution. In ECCV, 2024. 10, 17, 28" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 56, + 253, + 294, + 308 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 253, + 294, + 308 + ], + "spans": [ + { + "bbox": [ + 56, + 253, + 294, + 308 + ], + "type": "text", + "content": "[145] Mingjun Zheng, Long Sun, Jiangxin Dong, and Jinshan Pan. Smfanet: A lightweight self-modulation feature aggregation network for efficient image super-resolution. In European Conference on Computer Vision, pages 359-375. Springer, 2024. 29" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 308, + 294, + 351 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 308, + 294, + 351 + ], + "spans": [ + { + "bbox": [ + 56, + 308, + 294, + 351 + ], + "type": "text", + "content": "[146] Xu Zheng, Yunhao Luo, Pengyuan Zhou, and Lin Wang. Distilling efficient vision transformers from cnns for semantic segmentation. Pattern Recognition, 158:111029, 2025. 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 56, + 353, + 294, + 407 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 353, + 294, + 407 + ], + "spans": [ + { + "bbox": [ + 56, + 353, + 294, + 407 + ], + "type": "text", + "content": "[147] Yupeng Zhou, Zhen Li, Chun-Le Guo, Song Bai, Ming-Ming Cheng, and Qibin Hou. Srformer: Permuted self-attention for single image super-resolution. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 12780–12791, 2023. 30, 33, 34" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 56, + 409, + 294, + 463 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 409, + 294, + 463 + ], + "spans": [ + { + "bbox": [ + 56, + 409, + 294, + 463 + ], + "type": "text", + "content": "[148] Lianghui Zhu, Bencheng Liao, Qian Zhang, Xinlong Wang, Wenyu Liu, and Xinggang Wang. Vision mamba: Efficient visual representation learning with bidirectional state space model. In *Forty-first International Conference on Machine Learning*, 2024. 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 56, + 464, + 294, + 518 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 464, + 294, + 518 + ], + "spans": [ + { + "bbox": [ + 56, + 464, + 294, + 518 + ], + "type": "text", + "content": "[149] Qiang Zhu, Pengfei Li, and Qianhui Li. Attention retractable frequency fusion transformer for image super resolution. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1756-1763, 2023. 30" + } + ] + } + ], + "index": 8 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 732, + 311, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 732, + 311, + 742 + ], + "spans": [ + { + "bbox": [ + 299, + 732, + 311, + 742 + ], + "type": "text", + "content": "50" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 49 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2504_10xxx/2504.10861/a4e0028e-483e-4013-a80e-4d616bb12d80_content_list.json b/data/2025/2504_10xxx/2504.10861/a4e0028e-483e-4013-a80e-4d616bb12d80_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..8b2c4a88f8db120e7c4722e75505ebede5543be5 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10861/a4e0028e-483e-4013-a80e-4d616bb12d80_content_list.json @@ -0,0 +1,1562 @@ +[ + { + "type": "text", + "text": "$\\diamond$ Ai2 Scholar QA: Organized Literature Synthesis with Attribution", + "text_level": 1, + "bbox": [ + 132, + 93, + 863, + 115 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Amanpreet Singh* Joseph Chee Chang* Chloe Anastasiades* Dany Haddad* Aakanksha Naik Amber Tanaka Angele Zamarron Cecile Nguyen Jena D. Hwang Jason Dunkleberger Matt Latzke Smita Rao Jaron Lochner Rob Evans Rodney Kinney Daniel S. Weld Doug Downey* Sergey Feldman*", + "text_level": 1, + "bbox": [ + 119, + 130, + 863, + 197 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Allen Institute for AI", + "bbox": [ + 411, + 200, + 584, + 215 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "{amanpreets, sergey}@allenai.org", + "bbox": [ + 376, + 219, + 620, + 233 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 260, + 260, + 339, + 275 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Retrieval-augmented generation is increasingly effective in answering scientific questions from literature, but many state-of-the-art systems are expensive and closed-source. We introduce Ai2 Scholar QA, a free online scientific question answering application. To facilitate research, we make our entire pipeline public: as a customizable open-source Python package1 and interactive web app, along with paper indexes accessible through public APIs and downloadable datasets. We describe our system in detail and present experiments analyzing its key design decisions. In an evaluation on a recent scientific QA benchmark, we find that Ai2 Scholar QA outperforms competing systems.", + "bbox": [ + 141, + 286, + 460, + 500 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/57dca823c8b7ae7cfba9806c2ff0b51209622a04e30dde754ec83a334852b024.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 152, + 502, + 176, + 518 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/dbabd07881d038bbba4ce14a7b5fd344bdba0571ccbedb609f2101aa9a734980.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 152, + 519, + 174, + 533 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/b489c9848f844e933df64a0bc9c22d644c9e8218729b4c01c657c8dd7c272c9f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 154, + 536, + 174, + 548 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/949e3a4965caf39889b4cadbd454b66dcf6db9c4bb79bec0c563cbfa0d7df351.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 154, + 550, + 174, + 562 + ], + "page_idx": 0 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "qa.allen.ai", + "allenai/ai2-scholarqa-lib", + "Demo Video", + "Python Package" + ], + "bbox": [ + 193, + 508, + 384, + 564 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Introduction", + "text_level": 1, + "bbox": [ + 114, + 573, + 258, + 588 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Long-form scientific question answering systems use retrieval-augmented generation (RAG) (Lewis et al., 2020) over scientific literature to answer complex questions. These systems produce responses that bring together relevant insights from dozens of papers to help users rapidly learn about a body of scientific work. Examples are OpenScholar (Asai et al., 2024), Elicit, Consensus, and others §5.", + "bbox": [ + 112, + 598, + 487, + 726 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Most of these systems are expensive to use and closed source, relying on models, workflows, and retrieval solutions not shared publicly. These issues create barriers for researchers who wish to study or build on the work. In response, we introduce Ai2 Scholar QA, a free-to-use scientific QA system (qa.allen.ai), and share our key components as open source software and public APIs.", + "bbox": [ + 112, + 727, + 485, + 854 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Scholar QA follows a multi-stage pipeline (Figure 1) that starts by querying paper indexes: one", + "bbox": [ + 112, + 854, + 487, + 888 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "from Semantic Scholar with over 100M abstracts, and a new index that we introduce in this work containing 11.7M full-text scientific papers. The pipeline then re-ranks the retrieved passages with a cross-encoder, and finally prompts a Large Language Model (LLM) to filter, cluster, and synthesize the passages into an answer. The final answer is presented to the user in a report with expandable sections of prose, bulleted lists, and tables. Claims in the answer are supported by citations, which can be clicked to reveal the cited paper's title and authors (with links to their corresponding Semantic Scholar pages), and in many cases relevant excerpt(s) from the paper, allowing for quick verification of the claim.", + "bbox": [ + 507, + 261, + 884, + 501 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The system is based on open source code, enabling the community to reproduce and build on it. We release the code for our pipeline, prompting workflow and Web application. The retrieval indexes, including the new full text search index, are available as Semantic Scholar APIs and dataset downloads, and are continually updated with new articles (Kinney et al., 2023). Together, these resources can be combined with any generative LLM API to power a complete long-form scientific QA application. Our production system currently uses Anthropic's Claude 3.7 (Anthropic, 2024).", + "bbox": [ + 507, + 502, + 884, + 696 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "We present analyses that justify key design decisions in our architecture in §4. Our choice of retrieval models and configuration is informed by evaluation over a collection of real and synthetic user queries and accompanying passages judged for relevance by a LLM, both of which we release publicly. We compare Scholar QA's answers against several baselines, demonstrating that it achieves state-of-the-art performance on the ScholarQA-CS benchmark (Asai et al., 2024). Finally, we discuss the reception of Scholar QA by users. The strong majority $(85\\%)$ of user feedback is positive, and the reported issues suggest important improvements for future work.", + "bbox": [ + 507, + 696, + 882, + 920 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.10861v2 [cs.CL] 28 Jul 2025", + "bbox": [ + 21, + 294, + 60, + 703 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "* Core contributors", + "bbox": [ + 136, + 894, + 260, + 904 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "1We use closed state-of-the-art LLMs.", + "bbox": [ + 136, + 906, + 369, + 919 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/b5df53f86fb06f99e0f4351fefeea2efe4c0a077e930e6c8769c098085df09e0.jpg", + "image_caption": [ + "Figure 1: Scholar QA Pipeline Overview" + ], + "image_footnote": [], + "bbox": [ + 115, + 80, + 884, + 200 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2 Pipeline", + "text_level": 1, + "bbox": [ + 112, + 230, + 220, + 247 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The Scholar QA architecture (Figure 1) has three primary components: 1) retrieval to identify relevant passages from a corpus of scientific literature; 2) a neural cross-encoder that re-ranks the passages to select the most relevant top-k; and 3) multi-step LLM generation to process the passages into a comprehensive report. Next, we describe each component of the pipeline in detail.", + "bbox": [ + 112, + 250, + 489, + 380 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Query Validation. Prior to processing a query, we employ OpenAI's omni-moderation-latest $^2$ model for safeguarding against potentially harmful content and return appropriate error messages.", + "bbox": [ + 112, + 382, + 489, + 448 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.1 Retrieval", + "text_level": 1, + "bbox": [ + 112, + 457, + 233, + 472 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We use the Semantic Scholar API (Kinney et al., 2023) for retrieval, specifically its endpoint for keyword search over paper abstracts, and our new endpoint for querying snippets from open-access papers. A query decomposer re-formulates the user query for each endpoint and retrieves up to 256 snippets and 20 abstracts. These texts are referred to as \"passages\" below.", + "bbox": [ + 112, + 478, + 489, + 607 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Query Decomposer. The two retrieval endpoints differ in their effective query formats (one targets keyword and the other semantic queries) and filtering of results based on the user's preferences for paper metadata (paper year, venue, field of study). In our query decomposition step, an LLM is prompted to re-format the user query into paraphrases appropriate for each endpoint, and to extract the user's requested settings for the metadata filters. We use the outputs of this step for retrieval.", + "bbox": [ + 112, + 609, + 489, + 772 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Search APIs. The Semantic Scholar keyword search API is described in Kinney et al. (2023). We introduce a new /snippet/search endpoint, which searches over a corpus of passages extracted from S2ORC (Lo et al., 2020), loaded into a Vespa cluster with papers and passages. Papers include metadata for filtering. Passages are derived from a pa", + "bbox": [ + 112, + 774, + 490, + 888 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "per's title, abstract, or body and can be filtered at the paper level. The index includes 11.7M full-text papers across the fields of study listed here, and a total of 285.6M passages.", + "bbox": [ + 507, + 230, + 884, + 294 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Each passage is limited to 480 tokens and truncated at sentence and section boundaries where possible, having an overlap of one sentence (up to 64 tokens) with the preceding and following passages. Passage text is embedded with mxbai-embed-large-v1 (Lee et al., 2024) with binary quantization, and placed into a dense (approximate nearest neighbor) index, as well as a traditional sparse keyword index.", + "bbox": [ + 507, + 296, + 884, + 439 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We first retrieve a union of embedding and keyword-based matches, applying any specified filters. The filtered results are ranked with a weighted sum of embedding similarity and bm25 scores.", + "bbox": [ + 507, + 442, + 882, + 506 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.2 Reranking", + "text_level": 1, + "bbox": [ + 507, + 520, + 640, + 536 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The passages obtained from the retrieval step are subsequently passed to a neural re-ranker and the top 50 results are retained. The re-ranker is a cross-encoder that encodes both the query and a candidate document simultaneously and outputs a relevance score used to rank the documents. We selected mxbai-erank-large-v1 (Shakir et al., 2024) based on the results in §4.2 and host it on Modal with a single NVIDIA L40S GPU.", + "bbox": [ + 507, + 542, + 882, + 688 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.3 Multi-step Generation", + "text_level": 1, + "bbox": [ + 507, + 701, + 732, + 717 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The generation phase employs a three-step approach: first, the retrieved passages are processed to extract more precise quotes relevant to the query; second, the quotes are thematically clustered into separate sections appropriate for the answer; finally, a controlled generation process composes the final report one section at a time, synthesizing the quotes assigned to that section.", + "bbox": [ + 507, + 724, + 884, + 852 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Quote extraction. Passages from the retrieval stage can be lengthy and may contain extraneous information not useful for answering the user query (Asai et al., 2023). The quote extraction stage aims", + "bbox": [ + 507, + 857, + 882, + 921 + ], + "page_idx": 1 + }, + { + "type": "page_footnote", + "text": "2https://platform.openai.com/docs/guides/moderation", + "bbox": [ + 112, + 892, + 448, + 919 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "to select only the most relevant quotes from the passages to improve the precision of the answer.", + "bbox": [ + 112, + 84, + 485, + 116 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We instruct an LLM to extract verbatim quotes that directly contribute to answering the query (Slobodkin et al., 2024). As input to the extraction, we gather all passages from the re-ranker for a given paper, and concatenate these to the abstract of the paper. This aggregation helps create a richer context conducive to extracting relevant quotes. The LLM processes each paper's content independently and returns the selected quotes separated by ellipses. If the entire paper context is deemed irrelevant, it is discarded from further processing.", + "bbox": [ + 112, + 118, + 489, + 297 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Answer Outline and Clustering. For generating a comprehensive research report, the effective organization of reference materials is essential for its overall coherence. We propose a thematic outline framework where the answer is divided into sections representing topics, and the reference quotes are assigned to these topics. This mapping allows the system to selectively focus only on the pertinent subset of quotes when synthesizing a section.", + "bbox": [ + 112, + 300, + 489, + 447 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "First, the LLM is instructed to generate a list of themes in logical order and the appropriate synthesis format for each theme, independent of the quotes from the previous step. The first section is always an introduction or background to provide the user the basics for understanding the answer. The format of each section can be either a paragraph or a bulleted list, serving different information needs. Paragraphs convey nuanced summaries from multiple papers, while bulleted lists enumerate related papers (e.g., models, datasets, or interactive systems). These list are also the catalyst for generating the comparison tables (see §2.3). Following this, the sections are assigned 0 or more quotes. In case no quote is assigned to a section, it is generated completely from the LLM weights.", + "bbox": [ + 112, + 449, + 489, + 706 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Report Generation. With the answer outline in place, each section of the report is synthesized serially conditioned on the query, reference sources, and the sections prior to it. The LLM is also instructed to generate a TLDR for each section. The references are either the quotes assigned to the section or abstracts of papers that are cited within these quotes. This citation following method allows the LLM to condition on and cite foundational sources which are not uncovered in retrieval. The LLM is instructed to cite the sources for each claim in the generated section text and cite generations from its parameters as LLM Memory.", + "bbox": [ + 112, + 712, + 489, + 921 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Paper Comparison Table Generation. Since bulleted list sections typically include closely related papers (e.g., different datasets), we additionally generate tables that compare and contrast all papers cited in that section using common aspects (e.g., size and annotation method). This pipeline is detailed in Newman et al. (2024). At a high level, the inputs are the query to Scholar QA, the section title, and the abstracts of all papers cited in the section. An LLM first produces a set of common aspects (columns) to compare papers (rows). Each cell (paper-aspect pair) is filled with a value using the full-text of the paper. Finally, as not all aspects are applicable to every paper (e.g., one paper might not be about a dataset), we filter out columns and rows with a high proportion of missing values. Figure 3 [A] shows an expanded table in Scholar QA where related papers from a section are compared across a set of common aspects ([B]).", + "bbox": [ + 507, + 84, + 884, + 391 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3 Scholar QA: Interface and Source Code", + "text_level": 1, + "bbox": [ + 507, + 405, + 882, + 420 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Scholar QA is open-sourced as an extensible Python package (ai2-scholar-qa) and a Typescript and React-based interactive web application. The LLM functionality of Scholar QA is implemented with litellm, which supports swapping a variety of models using your own keys. Thus, the community can build upon Scholar QA and easily visualize the results (examples in Appendix A). Below we describe the user experience of the demo.3", + "bbox": [ + 507, + 432, + 884, + 577 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Progress and Section Streaming. High system latency can hinder usability. On average, Scholar QA produces a full report in 2.5 minutes (N=500, $\\sigma = 70\\mathrm{s}$ ), which is comparable to modern LLM-based research tools. To further improve usability, the following designs were used: 1) Displaying detailed real-time progress of the system (Nielsen, 1994) so users can examine the number of papers, passages, and sections being processed. 2) Presenting each section as soon as it is generated, so users can begin browsing the first section in 50 seconds (N=500, $\\sigma = 24\\mathrm{s}$ ) post issuing a query (Appendix H).", + "bbox": [ + 507, + 593, + 885, + 787 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Expandable Sections. By default, sections are collapsed showing only their titles, TLDR summaries, and number of cited sources. This gives users a gist of the information included in the report (Figure 2 [A]). Users can then click on the title of a section they wish to read to expand it ([B]).", + "bbox": [ + 507, + 797, + 884, + 894 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3Our production system has a few additional features like downloadable reports, login and links to other Ai2 systems.", + "bbox": [ + 507, + 894, + 882, + 921 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/c9a8ba26e2c2e5a83296e69aea82c8dcfe28e652909eb1832eb214e2af5358ec.jpg", + "image_caption": [ + "Figure 2: Multi-section [B] report generated by Scholar QA. References are linked to supporting excerpts [C]. Thumbs and free text feedback are collected for the full report [A], and also for each section and inline table." + ], + "image_footnote": [], + "bbox": [ + 152, + 80, + 842, + 517 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "References and Evidence Excerpts. To verify the claims in the report, users can click on the inline citations (Figure 2 [C]) or the pink excerpt icon in the inline table cells (Figure 3 [C]) to bring up a popup paper card. From the paper card, they can see the relevant excerpts used during the generation or click on the title to open the paper directly.", + "bbox": [ + 112, + 580, + 487, + 692 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "User Feedback Collection. We collect thumbs up/down or textual feedback for the whole report (Figure 2 [A]) and at each section and inline table.", + "bbox": [ + 112, + 696, + 489, + 744 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4 Evaluation", + "text_level": 1, + "bbox": [ + 112, + 760, + 243, + 776 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4.1 Retrieval", + "text_level": 1, + "bbox": [ + 112, + 787, + 233, + 801 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We tuned our retrieval setup by optimizing ranking over a dev set of 500 synthetic queries (see Appendix C) and the top 1000 passages for each based on GIST embedding distance (Solatorio, 2024). We generated binary relevance labels with gpt-4-turbo (see Appendix B for the prompt), which were found to have $80\\%$ agreement with", + "bbox": [ + 112, + 809, + 489, + 921 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/b8d80b8b7ef2cc0d21f8dccb37dcca884443d727a60e15f37512e39fd2a5eff8.jpg", + "image_caption": [ + "Figure 3: Inline tables compare papers [A] with common aspects [B] with values linked to supporting excerpts from the papers [C]." + ], + "image_footnote": [], + "bbox": [ + 512, + 606, + 884, + 829 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/81b8047cfb6dc431ad8aec3f0c23b80473c05396747e736928043641fcddda80.jpg", + "image_caption": [ + "Figure 4: Embedding ranking performance for various compression methods and matryoshka cutoffs. The $x$ -axis indicates the size of the vector index based relative to using int8 quantization and the full embedding size. The red circle indicates the selected configuration. Embedding size is notated next to each point." + ], + "image_footnote": [], + "bbox": [ + 117, + 84, + 480, + 234 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "human annotators on a sample of 100 queries.", + "bbox": [ + 112, + 349, + 455, + 365 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Pipeline Tuning. We optimized several aspects of retrieval over this dev set: embedding model selection and quantization method for it, the components and weights in the final ensemble, and (when relevant) the target Matryoshka dimension for the embeddings (Kusupati et al., 2024).", + "bbox": [ + 112, + 370, + 487, + 464 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We experimented with medium sized embedding models based on top performers on the retriever and ranking tasks of the MTEB (Muennighoff et al., 2022) leaderboard on HuggingFace. Table 4 in Appendix D lists our candidate models. The mxbai-embed-large-v1 (Lee et al., 2024) embeddings performed best over our dev set. Figure 4 validates our choice of quantization method and target Matryoshka dimension for these embeddings. We chose unary quantization with no Matryoshka truncation, (indicated by a red circle on the plot) since it satisfied our storage constraints without a large drop in performance. We experimented with assembling SparseEmbed (Kong et al., 2023), embedding cosine similarity, BM25, and chose the latter two (weight split of (0.6, 0.4) respectively) based on the results (See Appendix E). The BM25 scores are normalized with min-max scaling before computing the ensemble score.", + "bbox": [ + 115, + 467, + 487, + 772 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.2 Reranking", + "text_level": 1, + "bbox": [ + 112, + 785, + 243, + 802 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We chose the re-ranker based on evaluation over a mixture of real scientific questions from the Stack Exchange Computer Science, Math, and Statistics communities, real research queries written by the authors and their colleagues, and synthetic ones generated by fine-tuning GPT-4o-mini over questions from the ScholarQA-CS dataset (Asai et al.,", + "bbox": [ + 112, + 808, + 489, + 921 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/adc1effc9e55d3dbeb3ab620fb48b91360028f713edb853bede83ee8f1953340.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Model (Size)Latency (sec/query)nDCG @ 10mRR
bge-reranker-v2-m3 (568M)0.140.9130.973
akariasai/ranker_large (568M)0.140.9060.970
jina-reranker-v2-base (278M)0.060.9070.972
mxbai-rerank-large-v1 (435M)0.460.9270.975
mxbai-rerank-base-v1 (184M)0.190.9190.974
mxbai-rerank-xsmall-v1 (70M)0.110.9110.970
mxbai-rerank-base-v2 (0.5B)0.400.9180.974
mxbai-rerank-large-v2 (1.5B)0.700.9110.975
", + "bbox": [ + 510, + 80, + 887, + 204 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Table 1: Cross encoder re-ranker results on our dataset of GPT-4o labels. The best results are highlighted.", + "bbox": [ + 507, + 215, + 880, + 244 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "2024). For a given query, passages are retrieved and then awarded a relevance score in the range 0-3 with GPT-4o. We experiment with multiple state-of-the-art re-rankers (Chen et al., 2024; Shakir et al., 2024; Asai et al., 2024), and, as shown in Table 2, mxbai-erank-large-v1 gives the best results across the board (even outperforming its v2 model on our task). To reduce latency for deployment, we implemented optimizations like Pytorch model compilation. We release the evaluation data consisting of 2,426 queries and 225,618 passages.", + "bbox": [ + 507, + 256, + 882, + 435 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.3 Generation", + "text_level": 1, + "bbox": [ + 507, + 447, + 643, + 462 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We evaluate the final output of Scholar QA on the ScholarQA-CS dataset which consists of expert-annotated rubrics for 100 Computer Science research questions. The question-specific expert rubrics account for $60\\%$ of the final score, while the rest is computed based on global metrics of length, expertise and citations. We use GPT-4o (Hurst et al., 2024) as a judge with the utility provided by Asai et al. (2024) for automatic evaluation and compare against several baselines.", + "bbox": [ + 505, + 469, + 882, + 630 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "As shown in Table 2, our system outperforms popular LLMs: Llama 3.1 (Dubey et al., 2024), GPT 4.1 and Claude Sonnet 3.7 (Anthropic, 2024). It even outperforms reasoning models such as Sonnet 3.7 Thinking (Anthropic, 2025), o1-mini (OpenAI, 2024b) and o3-mini (Zhang et al., 2025) overall on the Scholar QA-CS benchmark. This setup lacks any retrieval so the models generate the responses completely from parametric memory. The benchmark rewards attribution and supporting evidence as a measure of trust in the system, so these models score lower overall. The reasoning based models perform better than our system on the rubrics score, which suggests that they may be superior backbones for our system. However, due to the additional reasoning tokens, these models are more expensive and also significantly increase latency.", + "bbox": [ + 507, + 631, + 882, + 921 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "For contemporary QA systems, we compare against OpenScholar with GPT-4o $^{4}$ , PaperQA2 (Skarlinski et al., 2024), Perplexity's Sonar Deep Research and STORM (Shao et al., 2024a). PaperQA2 did not release their retrieval corpus, so we substitute it with our retrieval pipeline for a fair comparison. Scholar QA obtains the best scores both on rubrics and overall, with the variant using Claude 3.7 Sonnet as the backbone scoring 2.4 points higher than STORM. For these QA systems, we also evaluate the attribution quality based on ALCE (Gao et al., 2023), which proposes entailment between claims and evidence to compute citation precision and recall. Again, we use GPT-4o as a judge to predict entailment (See Appendix F for the prompt) and treat each sentence in a response as a claim. Even with a report spanning multiple sections where all the sentences might not be cited, Scholar QA comes out far ahead of the other QA systems. Due to a lack of retrieval, this evaluation was not conducted when the LLMs are simply prompted to generate a response from memory. An interesting discovery from our analysis was that with an updated version of GPT-4o (i.e. gpt-4o-2024-11-20) as the judge, the scores are inflated compared to using gpt-4o-2024-08-06, even though the relative rankings are consistent (See Appendix J). For parity with Asai et al. (2023), we report the rubrics and citation scores with the older and newer model as the judge, respectively.", + "bbox": [ + 112, + 84, + 492, + 567 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "During our initial experiments, we restricted ScholarQA to only summarize the insights conditioned on the quotes extracted from retrieved passages. However, in cases where the retrieved passages were not relevant enough, the system failed to answer the question in favor of just discussing the information in the quotes. Moreover, for over $30\\%$ of instances in ScholarQA-CS, the rubrics require background information, even though the question might not. So, we updated our system LLM prompts to - a) Generate section text from memory if there is a lack of relevant retrieved passages and cite as LLM Memory and b) generate the first section as a background or introduction for the rest of the answer. The results reported here are obtained post these changes.", + "bbox": [ + 115, + 569, + 489, + 825 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "To finalize the backbone LLM for the production web application we conducted an anonymized pair", + "bbox": [ + 112, + 827, + 489, + 859 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/9c0755d3a040f8844c31873f60f1ce98f75706719db24c2384f489c70c87a910.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ModelScoreModelScore
RubricsTotalRubricsTotalCite
LLM Prompt (No Retrieval)QA Systems
Llama 3.1-8B48.847.3SQA-Claude 3.7 S58.061.948.1
Llama 3.1-70B52.448.6SQA-Claude 3.5 S52.661.352.1
Claude 3.5 S50.446.6OS-GPT-4o49.353.525.9
Claude 3.7 S61.555.9PaperQA238.751.425.3
+Thinking62.755.7Perplex. Sonar DR38.752.825.2
GPT-4.163.256.2STORM54.259.540.2
o1-mini62.355.5
o3-mini60.650.2
", + "bbox": [ + 509, + 80, + 884, + 215 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Table 2: Evaluation results on ScholarQA-CS benchmark. System responses are either generated by simply prompting LLMs with the questions or by issuing the queries to RAG based QA systems. Expert annotated rubrics only scores are reported in addition to the overall total. The overall best results are highlighted and best results within a category are underlined. SQA: Ai2 Scholar QA, OS: Open Scholar, S: Sonnet, Claude 3.5 S: claude-3-5-sonnet-20241022.", + "bbox": [ + 507, + 225, + 884, + 353 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "wise comparison among the authors of this work. We compare Claude 3.7 against 3.5. Out of 18 comparisons, Claude 3.7 Sonnet was the overwhelming favorite with 17 wins, reinforcing our hypothesis that (with no other changes) our system improves with newer and better backbone LLMs.", + "bbox": [ + 507, + 376, + 882, + 470 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.4 Real-world Usage and User Feedback", + "text_level": 1, + "bbox": [ + 507, + 500, + 850, + 514 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We have publicly deployed Scholar QA for 9 weeks, and received $30.2\\mathrm{k}$ questions from 8,219 unique visitors. On average, each response is about $2.4\\mathrm{k}$ words and costs $0.50 to produce. We observed 1,075 monthly repeated users who had issued queries on two distinct days over the course of a 30 day window. We analyze the user query types and the most prominent themes were deep-dive into specific research topics (15k) and comparative analysis of specific prior work (5k) (detailed distribution in Appendix I). A total of 2,433 thumbs feedback were submitted (Figure 2 [A]) and $85\\%$ were positive. These suggests real-world users benefited from using Scholar QA.", + "bbox": [ + 505, + 530, + 884, + 755 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "For insight into the failure modes, we manually examined the 383 instances of neutral/negative freeform feedback. Table 3 lists the feedback types we identified along with their counts as of May 2025 (example feedback in Appendix G). We hypothesize that follow-up questions may help address insufficient answer detail and cases with a lack of retrieved documents, while improved retrieval may help address incomplete or incorrect references and off-topic responses.", + "bbox": [ + 507, + 760, + 884, + 921 + ], + "page_idx": 5 + }, + { + "type": "page_footnote", + "text": "Our results are not identical to Asai et al. (2024) due to variance across LLM-as-a-judge runs. Their reported total score for OS-GPT-4o is 57.7. We re-ran the evaluation in order to obtain rubrics only scores, which they did not report.", + "bbox": [ + 112, + 870, + 489, + 921 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/1615ec25e8c63ce70bbe3adc92df8285d9e22d8f3252708cf9a5a2e898598511.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
CategoryCount
Incorrect or Missing References126
Off-topic or Misunderstood Query113
Request for More Detail or Specificity289
General Feedback on Quality149
Language or Format Issues78
", + "bbox": [ + 115, + 80, + 485, + 195 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 3: Feedback Categories and Counts", + "bbox": [ + 157, + 204, + 442, + 219 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "5 Related Work", + "text_level": 1, + "bbox": [ + 114, + 234, + 270, + 249 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Scientific Question Answering. Answering scientific questions involves navigating scholarly sources and accurately retrieving and synthesizing them. Recently, OpenScholar (Asai et al., 2024) introduced a retrieval-augmented model designed explicitly for scientific literature synthesis with citation-supported responses with significant improvement in accuracy and reduced citation hallucination. Scholar QA extends its capabilities by leveraging the latest state-of-the-art LLMs and an open source generation pipeline that filters literature into precise quotes and produces thematically organized and detailed answers. STORM (Shao et al., 2024b) synthesizes comprehensive, Wikipedia-like articles, a distinct task from long-form scientific question answering. Other works have focused on literature review synthesis: LitLLM (Agarwal et al., 2024), which like Scholar QA uses a structured planning-and-generation pipeline similar, and SurveyForge (Yan et al., 2025), which outlines heuristics before generation. Their code was not available at the time of our evaluation. Zhou et al. (2025) present a survey categorizing AI-driven research support systems across various stages of the scientific process, including literature synthesis.", + "bbox": [ + 115, + 263, + 489, + 665 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Commercial Tools for Scientific QA. Commercial RAG tools have emerged to facilitate research specifically tailored for scientific literature, such as Consensus (Consensus, 2024), which synthesizes findings from research papers, Scite (Scite, 2024), which evaluates claims by analyzing citation contexts, and Elicit (Elicit, 2024), which supports structured scientific literature reviews. Other general-purpose tools also support scientific inquiries: Perplexity (Perplexity, 2024), You.com (You.com, 2024), OpenAI Deep Research (OpenAI, 2024a) and Gemini Deep Research (DeepMind, 2024). Although these platforms leverage advanced retrieval and generation capabilities to facilitate literature reviews and deliver rapid insights,", + "bbox": [ + 112, + 678, + 489, + 921 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "they can be too expensive for widespread academic use and typically lack transparency regarding their pipelines. In contrast, Scholar QA is free with open sourced code and access to search APIs that enable the research community to build upon it.", + "bbox": [ + 507, + 84, + 882, + 165 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "6 Conclusion", + "text_level": 1, + "bbox": [ + 507, + 175, + 640, + 191 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We present Ai2 Scholar QA, a freely-available longform literature synthesis system that generates reports for complex scientific questions. We release key components as open source code and public APIs, and report experiments analyzing design decisions and demonstrate state-of-the-art results.", + "bbox": [ + 507, + 202, + 884, + 298 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Limitations", + "text_level": 1, + "bbox": [ + 509, + 310, + 613, + 325 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Supplementing the user feedback discussed in subsection 4.4, we would like to outline some limitations of our system and evaluation and our plans to mitigate them as part of fuvre work:", + "bbox": [ + 507, + 336, + 882, + 400 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "(i) Ai2 Scholar QA uses proprietary and closed-source LLM as the backbone for our production pipeline. As shown in Table 2, open source models lag behind the proprietary models in our evaluation. However, we are actively experimenting with open-sourced LLMs to replace the closed ones partially or completely in the pipeline. The open-sourced models will be specifically trained to do well on long-form scientific question answering and each of the sub-tasks in our multi-step generation. Further, our code is open-sourced and can easily be used with potentially any available LLM api provider supported by litellm.", + "bbox": [ + 519, + 411, + 882, + 636 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "(ii) We evaluate the answers generated by Scholar QA and compare against other systems on ScholarQA-CS dataset in subsection 4.3. Even though the answer rubrics are collected via human annotation, the evaluation is only limited to questions in the Computer Science domain and further relies completely on an LLM as the evaluator. In ongoing work, we are investigating more accurate benchmarks for evaluating long form scientific answers. Our approach uses real queries posed by users to Scholar QA, and human preference labels over answers from multiple systems in not just Computer Science, but Biomedicine and other scientific domains. These labels can serve as not only for evaluation, but also as training signals for models.", + "bbox": [ + 514, + 646, + 884, + 921 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Acknowledgments", + "text_level": 1, + "bbox": [ + 114, + 84, + 278, + 101 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We would like to thank the anonymous reviewers for helpful comments, suggestions and feedback on the manuscript. We would also like to acknowledge the Ai2 ScholarQA users for providing constructive feedback that helped us improve the system. Finally, we thank David Albright for helping with the demo video, the Ai2 communications team for their help with user outreach, and Ai2 engineers and researchers for their help with user testing before launch.", + "bbox": [ + 112, + 109, + 490, + 269 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 114, + 296, + 213, + 311 + ], + "page_idx": 7 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Shubham Agarwal, Gaurav Sahu, Abhay Puri, Issam Hadj Laradji, Krishnamurthy Dj Dvijotham, Jason Stanley, Laurent Charlin, and Christopher Pal. 2024. Litllms, llms for literature review: Are we there yet?", + "Anthropic. 2024. The claude 3 model family: Opus, sonnet, haiku.", + "Anthropic. 2025. Claude 3.7 sonnet system card.", + "Akari Asai, Jacqueline He, Rulin Shao, Weijia Shi, Amanpreet Singh, Joseph Chee Chang, Kyle Lo, Luca Soldaini, Sergey Feldman, Mike D'Arcy, David Wadden, Matt Latzke, Minyang Tian, Pan Ji, Shengyan Liu, Hao Tong, Bohao Wu, Yanyu Xiong, Luke S. Zettlemoyer, and 6 others. 2024. Openscholar: Synthesizing scientific literature with retrieval-augmented lms. ArXiv, abs/2411.14199.", + "Akari Asai, Zeqiu Wu, Yizhong Wang, Avirup Sil, and Hannaneh Hajishirzi. 2023. Self-rag: Learning to retrieve, generate, and critique through self-reflection. ArXiv, abs/2310.11511.", + "Jianlv Chen, Shitao Xiao, Peitian Zhang, Kun Luo, Defu Lian, and Zheng Liu. 2024. Bge m3-embedding: Multi-lingual, multi-functionality, multi-granularity text embeddings through self-knowledge distillation. Preprint, arXiv:2402.03216.", + "Consensus. 2024. Consensus - ai for research. Accessed: 2025-03-28.", + "Google DeepMind. 2024. Gemini - deep research mode. Accessed: 2025-03-28.", + "Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Amy Yang, Angela Fan, Anirudh Goyal, Anthony S. Hartshorn, Aobo Yang, Archi Mitra, Archie Sravankumar, Artem Korenev, Arthur Hinsvark, Arun Rao, Aston Zhang, and 510 others. 2024. The llama 3 herd of models. ArXiv, abs/2407.21783.", + "Elicit. 2024. Elicit - the ai research assistant. Accessed: 2025-03-28." + ], + "bbox": [ + 115, + 319, + 489, + 919 + ], + "page_idx": 7 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Tianyu Gao, Howard Yen, Jiatong Yu, and Danqi Chen. 2023. Enabling large language models to generate text with citations. In Conference on Empirical Methods in Natural Language Processing.", + "OpenAI Aaron Hurst, Adam Lerer, Adam P. Goucher, Adam Perelman, Aditya Ramesh, Aidan Clark, AJ Ostrow, Akila Welihinda, Alan Hayes, Alec Radford, Aleksander Mkadry, Alex Baker-Whitcomb, Alex Beutel, Alex Borzunov, Alex Carney, Alex Chow, Alexander Kirillov, Alex Nichol, Alex Paino, and 397 others. 2024. Gpt-4o system card. ArXiv, abs/2410.21276.", + "Rodney Michael Kinney, Chloe Anastasiades, Russell Authur, Iz Beltagy, Jonathan Bragg, Alexandra Buraczynski, Isabel Cachola, Stefan Candra,oganand Chandrasekhar, Arman Cohen, Miles Crawford, Doug Downey, Jason Dunkelberger, Oren Etzioni, Rob Evans, Sergey Feldman, Joseph Gorney, David W. Graham, F.Q. Hu, and 29 others. 2023. The semantic scholar open data platform. *ArXiv*, abs/2301.10140.", + "Weize Kong, Jeffrey M. Dudek, Cheng Li, Mingyang Zhang, and Michael Bendersky. 2023. Sparseembed: Learning sparse lexical representations with contextual embeddings for retrieval. In Proceedings of the 46th International ACM SIGIR Conference on Research and Development in Information Retrieval, SIGIR '23, page 2399-2403. ACM.", + "Aditya Kusupati, Gantavya Bhatt, Aniket Rege, Matthew Wallingford, Aditya Sinha, Vivek Ramanujan, William Howard-Snyder, Kaifeng Chen, Sham Kakade, Prateek Jain, and Ali Farhadi. 2024. Matryoshka representation learning. Preprint, arXiv:2205.13147.", + "Sean Lee, Aamir Shakir, Darius Koenig, and Julius Lipp. 2024. Open source strikes bread - new fluffy embeddings model.", + "Patrick Lewis, Ethan Perez, Aleksandara Piktus, Fabio Petroni, Vladimir Karpukhin, Naman Goyal, Heinrich Kuttler, Mike Lewis, Wen tau Yih, Tim Rocktäschel, Sebastian Riedel, and Douwe Kiela. 2020. Retrieval-augmented generation for knowledge-intensive nlp tasks. ArXiv, abs/2005.11401.", + "Kyle Lo, Lucy Lu Wang, Mark Neumann, Rodney Michael Kinney, and Daniel S. Weld. 2020. S2orc: The semantic scholar open research corpus. In Annual Meeting of the Association for Computational Linguistics.", + "Niklas Muennighoff, Nouamane Tazi, Loic Magne, and Nils Reimers. 2022. Mteb: Massive text embedding benchmark. In Conference of the European Chapter of the Association for Computational Linguistics.", + "Benjamin Newman, Yoonjoo Lee, Aakanksha Naik, Pao Siangliulue, Raymond Fok, Juho Kim, Daniel S. Weld, Joseph Chee Chang, and Kyle Lo. 2024. Arxiv digestables: Synthesizing scientific literature into tables using language models. In Conference on Empirical Methods in Natural Language Processing." + ], + "bbox": [ + 510, + 85, + 884, + 920 + ], + "page_idx": 7 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Jakob Nielsen. 1994. Enhancing the explanatory power of usability heuristics. In Proceedings of the SIGCHI conference on Human Factors in Computing Systems, pages 152-158.", + "OpenAI. 2024a. Chatgpt - deep research mode. Accessed: 2025-03-28.", + "OpenAI. 2024b. Openai o1 system card.", + "Perplexity. 2024. Perplexity ai - ask anything. Accessed: 2025-03-28.", + "Nils Reimers and Iryna Gurevych. 2019. Sentence-bert: Sentence embeddings using siamese bert-networks. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing. Association for Computational Linguistics.", + "Scite. 2024. Scite - smart citations for research. Accessed: 2025-03-28.", + "Aamir Shakir, Darius Koenig, Julius Lipp, and Sean Lee. 2024. Boost your search with the crispy mixedbread rerank models.", + "Yijia Shao, Yucheng Jiang, Theodore Kanell, Peter Xu, Omar Khattab, and Monica Lam. 2024a. Assisting in writing Wikipedia-like articles from scratch with large language models. In Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), pages 6252-6278, Mexico City, Mexico. Association for Computational Linguistics.", + "Yijia Shao, Yucheng Jiang, Theodore A. Kanell, Peter Xu, Omar Khattab, and Monica S. Lam. 2024b. Assisting in writing wikipedia-like articles from scratch with large language models. Preprint, arXiv:2402.14207.", + "Michael D. Skarlinski, Sam Cox, Jon M. Laurent, James D. Braza, Michaela M. Hinks, Michael J Hammerling, Manvitha Ponnapati, Samuel G. Rodriques, and Andrew D. White. 2024. Language agents achieve superhuman synthesis of scientific knowledge. ArXiv, abs/2409.13740.", + "Aviv Slobodkin, Eran Hirsch, Arie Cattan, Tal Schuster, and Ido Dagan. 2024. Attribute first, then generate: Locally-attributable grounded text generation. In Annual Meeting of the Association for Computational Linguistics.", + "Aivin V. Solatorio. 2024. Gistembed: Guided in-sample selection of training negatives for text embedding fine-tuning. ArXiv, abs/2402.16829.", + "Saba Sturua, Isabelle Mohr, Mohammad Kalim Akram, Michael Gunther, Bo Wang, Markus Kimmel, Feng Wang, Georgios Mastrupas, Andreas Koukounas, Andreas Koukounas, Nan Wang, and Han Xiao. 2024. jina-embeddings-v3: Multilingual embeddings with task lora. Preprint, arXiv:2409.10173." + ], + "bbox": [ + 115, + 85, + 485, + 919 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Liang Wang, Nan Yang, Xiaolong Huang, Binxing Jiao, Linjun Yang, Daxin Jiang, Rangan Majumder, and Furu Wei. 2022. Text embeddings by weakly-supervised contrastive pre-training. arXiv preprint arXiv:2212.03533.", + "Xiangchao Yan, Shiyang Feng, Jiakang Yuan, Renqiu Xia, Bin Wang, Bo Zhang, and Lei Bai. 2025. Surveyforge: On the outline heuristics, memory-driven generation, and multi-dimensional evaluation for automated survey writing.", + "You.com. 2024. You.com - personalized ai search. Accessed: 2025-03-28.", + "Brian Zhang, Eric Mitchell, Hongyu Ren, Kevin Lu, Max Schwarzer, Michelle Pokrass, Shengjia Zhao, Ted Sanders, Adam Kalai, Alexandre Passos, Benjamin Sokolowsky, Elaine Ya Le, Erik Ritter, Hao Sheng, Hanson Wang, Ilya Kostrikov, James Lee, Johannes Ferstad, Michael Lampe, and 93 others. 2025. Openai o3-mini system card.", + "Zekun Zhou, Xiaocheng Feng, Lei Huang, Xiachong Feng, Ziyun Song, Ruihan Chen, Liang Zhao, Weitao Ma, Yuxuan Gu, Baoxin Wang, Dayong Wu, Guoping Hu, Ting Liu, and Bing Qin. 2025. From hypothesis to publication: A comprehensive survey of ai-driven research support systems." + ], + "bbox": [ + 510, + 85, + 880, + 454 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "A Python Package Usage", + "text_level": 1, + "bbox": [ + 114, + 84, + 349, + 101 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Figure 5 shows a minimal example of running the system pipeline with the ai2-scholar-qa python package and how every component can be extended or modified as the users see fit.", + "bbox": [ + 112, + 108, + 487, + 172 + ], + "page_idx": 9 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "from scholarqa rag. reranker. reranker_base import CrossEncoderScores \nfrom scholarqa rag. retrieval import PaperFinderWithReranker \nfrom scholarqa rag. retriever_base import FullTextRetriever \nfrom scholarqa import ScholarQA \nCLAUSE_SONNET_3_7 = \"anthropic/clause-3-7-sonnet-20250219\" \n#Extends the scholarqa rag. retrieval.AAbstractRetriever class \nretriever $=$ FullTextRetriever(n_retrieval=256, n_keyword_shrc=20) \n#Extends the scholarqa rag. reranker. reranker_base.AAbstractReranker class \nreranker $=$ CrossEncoderScores(\"mixedbread-ai/mxbai-erank-large-v1\") \n#Wrapper class for retrieval \npaper_find $\\equiv$ PaperFinderWithReranker(retriever, reranker, n_ rerank=50, context_threshold=0.5) \n#Scholar QA wrapper with the MultiStepQAPipeline integrated \nscholar_qa $=$ ScholarQA(paper_find, llm_model $\\coloneqq$ CLAUSEDSONNET_3_7) \nprint(scholar_qa answer_query(\"Which is the 9th planet in our solar system?\")) \n#Custom MultiStepQAPipeline class/steps \nfrom scholarqa rag. multi_step_qapipeline import MultiStepQAPipeline \nmqapipeline $=$ MultiStepQAPipeline(llm_model $\\coloneqq$ CLAUSEDSONNET_3_7) \npaperquotes $=$ mqapipeline step_select Quotes(query,...)#Quote Extraction \nplan $=$ mqapipeline step_clustering(query, paperquotes,...)#Outline and Clustering \n#Section Generation \nresponse $=$ list(mqapipeline generate_iterations.summary(query, paperquotes, plan,...))", + "guess_lang": "python", + "bbox": [ + 117, + 179, + 487, + 407 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "B Document Relevance Prompt", + "text_level": 1, + "bbox": [ + 114, + 460, + 403, + 476 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "We used the following prompt to obtain binary relevance labels, which agreed with human annotators $80\\%$ of the time:", + "bbox": [ + 112, + 483, + 489, + 531 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "If any part of the following text is relevant to the following question, then return 1, otherwise return 0. Non-english results are not relevant, results which are primarily tables are not relevant.", + "bbox": [ + 149, + 536, + 453, + 608 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "C Retrieval Tuning Query Generation", + "text_level": 1, + "bbox": [ + 112, + 620, + 460, + 637 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Queries for the dev set were obtained from three internal sources of human research questions, and a set of LLM generations. We experimented with several methods for constructing the synthetic LLM questions. Our approach was to generate questions similar to those asked by real users by prompting the LLM to output: (1) a question based on paragraphs retrieved from the corpus, and (2) a \"more general\" version of the first question. We only use the \"more general\" set since they were more similar to real user queries.", + "bbox": [ + 112, + 645, + 489, + 821 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "D Embedding Models for Retrieval", + "text_level": 1, + "bbox": [ + 112, + 832, + 435, + 848 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "We experimented with multiple top embedding models from the MTEB leader board to optimize retrieval for our system. These are outlined in Table 4.", + "bbox": [ + 112, + 856, + 489, + 920 + ], + "page_idx": 9 + }, + { + "type": "table", + "img_path": "images/26e2dc0222a124557de7ef89388bbab16efa7595101dd2575acfdafeb460f0ba.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
HuggingFace embedding model name
Snowflake/snowflake-arctic-embed-m5
sentence-transformers/all-mpnet-base-v2 (Reimers and Gurevych, 2019)
avsolatorio/GIST-Embedding-v0 (Solatorio, 2024)
Snowflake/snowflake-arctic-embed-m-long6
intfloat/e5-base-v2 (Wang et al., 2022)
mixedbread-ai/mxbai-embed-large-v1 (Lee et al., 2024)
jinaai/jina-embeddings-v3 (Sturua et al., 2024)
", + "bbox": [ + 521, + 80, + 870, + 209 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Table 4: Embedding Models to optimize retrieval", + "bbox": [ + 527, + 219, + 862, + 233 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "E Retrieval Ensemble Experiments", + "text_level": 1, + "bbox": [ + 507, + 256, + 831, + 274 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Figure 6 shows results of our ensembling experiments for the full-text retrieval index. SparseEmbed introduces an overhead with minimal performance gains, so we picked an ensemble of embedding similarity and BM25 as our final ranking metric.", + "bbox": [ + 507, + 282, + 884, + 378 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/01163041117b4702addfc2828f34282b7c07a400177ffc2748176f9968e4d482.jpg", + "image_caption": [ + "Figure 5: ai2-scholar-qa usage example", + "Figure 6: Ranking performance for various ensembles with relative size of the index required. Excluding SparseEmbed reduces the index size by $20\\%$ without a significant drop in ranking performance." + ], + "image_footnote": [], + "bbox": [ + 517, + 394, + 870, + 579 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "F Prompt for Evaluating Attribution", + "text_level": 1, + "bbox": [ + 507, + 683, + 843, + 700 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "As an Attribution Validator, your task is to verify whether a given reference can support the given claim. A claim can be either a plain sentence or a question followed by its answer. Specifically, your response should clearly indicate the relationship: Attributable, Contradictory or Extrapolatory. A contradictory error occurs when you can infer that the answer contradicts the fact presented in the context, while an extrapolatory error means that you cannot infer the correctness of the answer based on the information provided in the context. Output your response as a json with only a single key \"output\" and a value of one among (\"Attributable\", \"Contradictory\",", + "bbox": [ + 542, + 706, + 847, + 921 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "\"Extrapolatory\"). \nClaim: claim \nReference: ref_excerpt", + "bbox": [ + 151, + 86, + 321, + 123 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "G User Feedback Examples", + "text_level": 1, + "bbox": [ + 114, + 136, + 371, + 154 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Table 5 lists some examples of the user complaints for Scholar QA reports.", + "bbox": [ + 112, + 162, + 485, + 193 + ], + "page_idx": 10 + }, + { + "type": "table", + "img_path": "images/38a8653ec599447018cb8bf10426f6f449740837923b31257f25fb40d6962518.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Feedback
The structure is good, but the articles you choose are not from top journals.
The first citation says that rabbits can obtain cholesterol from diet, not rats.
These provide a lot of general information about the topic, but nothing here actually addresses the central question I asked.
The answer did not address the ‘MOBILIZATION’ techniques at all! The answer is wrong because it addressed Exercise therapy!
They address the general setting, but not the specific question I asked.
It’s only analysing on SASAF model, but there are more.
", + "bbox": [ + 115, + 205, + 497, + 378 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "H Progress Updates and Report Sections", + "text_level": 1, + "bbox": [ + 112, + 430, + 482, + 448 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Figure 7 demonstrates how we display in real-time the progress of the system during generation. This included number of papers and passages the were processed in each step, as well as the outline as it is being generated. Each section appears as soon as it is generated, so users can begin browsing the first sections.", + "bbox": [ + 112, + 457, + 485, + 568 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/ce3b077c2ebdcf5f7466cd6d35673d3f0ab2172dcdb52e8846aace40f300e321.jpg", + "image_caption": [ + "Figure 7: Progress indication and section streaming." + ], + "image_footnote": [], + "bbox": [ + 115, + 577, + 485, + 806 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "I Query Type Analysis", + "text_level": 1, + "bbox": [ + 112, + 862, + 327, + 879 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "To analyze the types of questions users are asking, we use an LLM to categorize the queries. The most", + "bbox": [ + 112, + 889, + 487, + 921 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/daf247f1ede05b107084c0cddaa32843207f0c0fac484c5bea2ddc5880c2935f.jpg", + "image_caption": [ + "Figure 8: Distribution of different question types submitted to Scholar QA deployed Web application." + ], + "image_footnote": [], + "bbox": [ + 526, + 101, + 878, + 252 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "prominent types were comprehensive deep-dive into a specific research topic (15k) and comparative analysis of prior work (5k). Other themes such as factoid QA or specific methods, datasets accounted for fewer queries.", + "bbox": [ + 507, + 319, + 882, + 399 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "J Generation Results with updated GPT-40", + "text_level": 1, + "bbox": [ + 507, + 411, + 828, + 442 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Table 6 shows results on ScholarQA-CS with gpt-4o-2024-11-20 as the LLM judge. These results can be contrasted with the first two columns in Table 2 which are obtained with gpt-4o-2024-08-06 as the judge. Even though the absolute scores are inflated compared to Table 2, the relative rankings are about the same with Scholar QA getting the best overall score.", + "bbox": [ + 507, + 453, + 882, + 582 + ], + "page_idx": 10 + }, + { + "type": "table", + "img_path": "images/ec626c759b741cd79197869ce8ff27fd318689aae8160e428396774589d3982f.jpg", + "table_caption": [ + "Table 5: Example Feedback on Research Issues" + ], + "table_footnote": [], + "table_body": "
ModelScoreModelScore
RubricsTotalRubricsTotal
LLM Prompting (No Retrieval)QA Systems
Llama 3.1-8B51.848.2SQA-Claude 3.7 S67.367.2
Llama 3.1-70B57.051.2SQA-Claude 3.5 S61.367.1
Claude 3.5 S57.851.3OS-GPT-4o54.959.9
Claude 3.7 S68.460.8PaperQA243.854.1
+Thinking68.358.7Perplex. Sonar DR43.956.0
GPT-4.169.361.8STORM59.264.7
o1-mini69.161.3
o3-mini68.555.9
", + "bbox": [ + 521, + 593, + 870, + 728 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Table 6: Evaluation results on ScholarQA-CS benchmark with gpt-4o-2024-11-20 as the judge. System responses are either generated by simply prompting LLMs with the questions or by issuing the queries to RAG based QA systems. Expert annotated rubrics only scores are reported in addition to the overall total. The overall best results are highlighted and best results within a category are underlined. SQA: Ai2 Scholar QA, OS: Open Scholar, S: Sonnet, Claude 3.5 S: claude-3-5-sonnet-20241022.", + "bbox": [ + 507, + 737, + 884, + 879 + ], + "page_idx": 10 + } +] \ No newline at end of file diff --git a/data/2025/2504_10xxx/2504.10861/a4e0028e-483e-4013-a80e-4d616bb12d80_model.json b/data/2025/2504_10xxx/2504.10861/a4e0028e-483e-4013-a80e-4d616bb12d80_model.json new file mode 100644 index 0000000000000000000000000000000000000000..180dabb63e0f4defa994ab5f0cfb0044e4921da0 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10861/a4e0028e-483e-4013-a80e-4d616bb12d80_model.json @@ -0,0 +1,2004 @@ +[ + [ + { + "type": "title", + "bbox": [ + 0.134, + 0.094, + 0.864, + 0.116 + ], + "angle": 0, + "content": "\\(\\diamond\\) Ai2 Scholar QA: Organized Literature Synthesis with Attribution" + }, + { + "type": "title", + "bbox": [ + 0.12, + 0.131, + 0.864, + 0.198 + ], + "angle": 0, + "content": "Amanpreet Singh* Joseph Chee Chang* Chloe Anastasiades* Dany Haddad* Aakanksha Naik Amber Tanaka Angele Zamarron Cecile Nguyen Jena D. Hwang Jason Dunkleberger Matt Latzke Smita Rao Jaron Lochner Rob Evans Rodney Kinney Daniel S. Weld Doug Downey* Sergey Feldman*" + }, + { + "type": "text", + "bbox": [ + 0.413, + 0.202, + 0.586, + 0.216 + ], + "angle": 0, + "content": "Allen Institute for AI" + }, + { + "type": "text", + "bbox": [ + 0.378, + 0.22, + 0.621, + 0.234 + ], + "angle": 0, + "content": "{amanpreets, sergey}@allenai.org" + }, + { + "type": "title", + "bbox": [ + 0.261, + 0.261, + 0.341, + 0.276 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.142, + 0.287, + 0.461, + 0.501 + ], + "angle": 0, + "content": "Retrieval-augmented generation is increasingly effective in answering scientific questions from literature, but many state-of-the-art systems are expensive and closed-source. We introduce Ai2 Scholar QA, a free online scientific question answering application. To facilitate research, we make our entire pipeline public: as a customizable open-source Python package1 and interactive web app, along with paper indexes accessible through public APIs and downloadable datasets. We describe our system in detail and present experiments analyzing its key design decisions. In an evaluation on a recent scientific QA benchmark, we find that Ai2 Scholar QA outperforms competing systems." + }, + { + "type": "image", + "bbox": [ + 0.153, + 0.504, + 0.178, + 0.519 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.154, + 0.52, + 0.176, + 0.535 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.155, + 0.537, + 0.176, + 0.549 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.155, + 0.551, + 0.176, + 0.563 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.194, + 0.509, + 0.28, + 0.521 + ], + "angle": 0, + "content": "qa.allen.ai" + }, + { + "type": "text", + "bbox": [ + 0.194, + 0.523, + 0.386, + 0.535 + ], + "angle": 0, + "content": "allenai/ai2-scholarqa-lib" + }, + { + "type": "text", + "bbox": [ + 0.194, + 0.538, + 0.273, + 0.549 + ], + "angle": 0, + "content": "Demo Video" + }, + { + "type": "text", + "bbox": [ + 0.194, + 0.552, + 0.303, + 0.565 + ], + "angle": 0, + "content": "Python Package" + }, + { + "type": "list", + "bbox": [ + 0.194, + 0.509, + 0.386, + 0.565 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.115, + 0.574, + 0.26, + 0.589 + ], + "angle": 0, + "content": "1 Introduction" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.599, + 0.489, + 0.727 + ], + "angle": 0, + "content": "Long-form scientific question answering systems use retrieval-augmented generation (RAG) (Lewis et al., 2020) over scientific literature to answer complex questions. These systems produce responses that bring together relevant insights from dozens of papers to help users rapidly learn about a body of scientific work. Examples are OpenScholar (Asai et al., 2024), Elicit, Consensus, and others §5." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.728, + 0.487, + 0.856 + ], + "angle": 0, + "content": "Most of these systems are expensive to use and closed source, relying on models, workflows, and retrieval solutions not shared publicly. These issues create barriers for researchers who wish to study or build on the work. In response, we introduce Ai2 Scholar QA, a free-to-use scientific QA system (qa.allen.ai), and share our key components as open source software and public APIs." + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.856, + 0.489, + 0.889 + ], + "angle": 0, + "content": "Scholar QA follows a multi-stage pipeline (Figure 1) that starts by querying paper indexes: one" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.262, + 0.885, + 0.502 + ], + "angle": 0, + "content": "from Semantic Scholar with over 100M abstracts, and a new index that we introduce in this work containing 11.7M full-text scientific papers. The pipeline then re-ranks the retrieved passages with a cross-encoder, and finally prompts a Large Language Model (LLM) to filter, cluster, and synthesize the passages into an answer. The final answer is presented to the user in a report with expandable sections of prose, bulleted lists, and tables. Claims in the answer are supported by citations, which can be clicked to reveal the cited paper's title and authors (with links to their corresponding Semantic Scholar pages), and in many cases relevant excerpt(s) from the paper, allowing for quick verification of the claim." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.504, + 0.885, + 0.697 + ], + "angle": 0, + "content": "The system is based on open source code, enabling the community to reproduce and build on it. We release the code for our pipeline, prompting workflow and Web application. The retrieval indexes, including the new full text search index, are available as Semantic Scholar APIs and dataset downloads, and are continually updated with new articles (Kinney et al., 2023). Together, these resources can be combined with any generative LLM API to power a complete long-form scientific QA application. Our production system currently uses Anthropic's Claude 3.7 (Anthropic, 2024)." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.697, + 0.884, + 0.921 + ], + "angle": 0, + "content": "We present analyses that justify key design decisions in our architecture in §4. Our choice of retrieval models and configuration is informed by evaluation over a collection of real and synthetic user queries and accompanying passages judged for relevance by a LLM, both of which we release publicly. We compare Scholar QA's answers against several baselines, demonstrating that it achieves state-of-the-art performance on the ScholarQA-CS benchmark (Asai et al., 2024). Finally, we discuss the reception of Scholar QA by users. The strong majority \\((85\\%)\\) of user feedback is positive, and the reported issues suggest important improvements for future work." + }, + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.296, + 0.061, + 0.705 + ], + "angle": 270, + "content": "arXiv:2504.10861v2 [cs.CL] 28 Jul 2025" + }, + { + "type": "page_footnote", + "bbox": [ + 0.137, + 0.895, + 0.262, + 0.906 + ], + "angle": 0, + "content": "* Core contributors" + }, + { + "type": "page_footnote", + "bbox": [ + 0.137, + 0.907, + 0.37, + 0.92 + ], + "angle": 0, + "content": "1We use closed state-of-the-art LLMs." + } + ], + [ + { + "type": "image", + "bbox": [ + 0.117, + 0.081, + 0.885, + 0.201 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.358, + 0.208, + 0.642, + 0.224 + ], + "angle": 0, + "content": "Figure 1: Scholar QA Pipeline Overview" + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.231, + 0.221, + 0.248 + ], + "angle": 0, + "content": "2 Pipeline" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.251, + 0.49, + 0.381 + ], + "angle": 0, + "content": "The Scholar QA architecture (Figure 1) has three primary components: 1) retrieval to identify relevant passages from a corpus of scientific literature; 2) a neural cross-encoder that re-ranks the passages to select the most relevant top-k; and 3) multi-step LLM generation to process the passages into a comprehensive report. Next, we describe each component of the pipeline in detail." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.384, + 0.49, + 0.449 + ], + "angle": 0, + "content": "Query Validation. Prior to processing a query, we employ OpenAI's omni-moderation-latest\\(^2\\) model for safeguarding against potentially harmful content and return appropriate error messages." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.458, + 0.235, + 0.473 + ], + "angle": 0, + "content": "2.1 Retrieval" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.479, + 0.49, + 0.608 + ], + "angle": 0, + "content": "We use the Semantic Scholar API (Kinney et al., 2023) for retrieval, specifically its endpoint for keyword search over paper abstracts, and our new endpoint for querying snippets from open-access papers. A query decomposer re-formulates the user query for each endpoint and retrieves up to 256 snippets and 20 abstracts. These texts are referred to as \"passages\" below." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.611, + 0.49, + 0.773 + ], + "angle": 0, + "content": "Query Decomposer. The two retrieval endpoints differ in their effective query formats (one targets keyword and the other semantic queries) and filtering of results based on the user's preferences for paper metadata (paper year, venue, field of study). In our query decomposition step, an LLM is prompted to re-format the user query into paraphrases appropriate for each endpoint, and to extract the user's requested settings for the metadata filters. We use the outputs of this step for retrieval." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.775, + 0.491, + 0.889 + ], + "angle": 0, + "content": "Search APIs. The Semantic Scholar keyword search API is described in Kinney et al. (2023). We introduce a new /snippet/search endpoint, which searches over a corpus of passages extracted from S2ORC (Lo et al., 2020), loaded into a Vespa cluster with papers and passages. Papers include metadata for filtering. Passages are derived from a pa" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.231, + 0.885, + 0.296 + ], + "angle": 0, + "content": "per's title, abstract, or body and can be filtered at the paper level. The index includes 11.7M full-text papers across the fields of study listed here, and a total of 285.6M passages." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.297, + 0.885, + 0.441 + ], + "angle": 0, + "content": "Each passage is limited to 480 tokens and truncated at sentence and section boundaries where possible, having an overlap of one sentence (up to 64 tokens) with the preceding and following passages. Passage text is embedded with mxbai-embed-large-v1 (Lee et al., 2024) with binary quantization, and placed into a dense (approximate nearest neighbor) index, as well as a traditional sparse keyword index." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.443, + 0.884, + 0.507 + ], + "angle": 0, + "content": "We first retrieve a union of embedding and keyword-based matches, applying any specified filters. The filtered results are ranked with a weighted sum of embedding similarity and bm25 scores." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.521, + 0.642, + 0.537 + ], + "angle": 0, + "content": "2.2 Reranking" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.543, + 0.884, + 0.689 + ], + "angle": 0, + "content": "The passages obtained from the retrieval step are subsequently passed to a neural re-ranker and the top 50 results are retained. The re-ranker is a cross-encoder that encodes both the query and a candidate document simultaneously and outputs a relevance score used to rank the documents. We selected mxbai-erank-large-v1 (Shakir et al., 2024) based on the results in §4.2 and host it on Modal with a single NVIDIA L40S GPU." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.702, + 0.733, + 0.718 + ], + "angle": 0, + "content": "2.3 Multi-step Generation" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.725, + 0.885, + 0.853 + ], + "angle": 0, + "content": "The generation phase employs a three-step approach: first, the retrieved passages are processed to extract more precise quotes relevant to the query; second, the quotes are thematically clustered into separate sections appropriate for the answer; finally, a controlled generation process composes the final report one section at a time, synthesizing the quotes assigned to that section." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.858, + 0.884, + 0.922 + ], + "angle": 0, + "content": "Quote extraction. Passages from the retrieval stage can be lengthy and may contain extraneous information not useful for answering the user query (Asai et al., 2023). The quote extraction stage aims" + }, + { + "type": "page_footnote", + "bbox": [ + 0.114, + 0.894, + 0.449, + 0.92 + ], + "angle": 0, + "content": "2https://platform.openai.com/docs/guides/moderation" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.114, + 0.085, + 0.486, + 0.117 + ], + "angle": 0, + "content": "to select only the most relevant quotes from the passages to improve the precision of the answer." + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.119, + 0.49, + 0.298 + ], + "angle": 0, + "content": "We instruct an LLM to extract verbatim quotes that directly contribute to answering the query (Slobodkin et al., 2024). As input to the extraction, we gather all passages from the re-ranker for a given paper, and concatenate these to the abstract of the paper. This aggregation helps create a richer context conducive to extracting relevant quotes. The LLM processes each paper's content independently and returns the selected quotes separated by ellipses. If the entire paper context is deemed irrelevant, it is discarded from further processing." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.302, + 0.49, + 0.448 + ], + "angle": 0, + "content": "Answer Outline and Clustering. For generating a comprehensive research report, the effective organization of reference materials is essential for its overall coherence. We propose a thematic outline framework where the answer is divided into sections representing topics, and the reference quotes are assigned to these topics. This mapping allows the system to selectively focus only on the pertinent subset of quotes when synthesizing a section." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.45, + 0.49, + 0.707 + ], + "angle": 0, + "content": "First, the LLM is instructed to generate a list of themes in logical order and the appropriate synthesis format for each theme, independent of the quotes from the previous step. The first section is always an introduction or background to provide the user the basics for understanding the answer. The format of each section can be either a paragraph or a bulleted list, serving different information needs. Paragraphs convey nuanced summaries from multiple papers, while bulleted lists enumerate related papers (e.g., models, datasets, or interactive systems). These list are also the catalyst for generating the comparison tables (see §2.3). Following this, the sections are assigned 0 or more quotes. In case no quote is assigned to a section, it is generated completely from the LLM weights." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.713, + 0.49, + 0.922 + ], + "angle": 0, + "content": "Report Generation. With the answer outline in place, each section of the report is synthesized serially conditioned on the query, reference sources, and the sections prior to it. The LLM is also instructed to generate a TLDR for each section. The references are either the quotes assigned to the section or abstracts of papers that are cited within these quotes. This citation following method allows the LLM to condition on and cite foundational sources which are not uncovered in retrieval. The LLM is instructed to cite the sources for each claim in the generated section text and cite generations from its parameters as LLM Memory." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.085, + 0.885, + 0.392 + ], + "angle": 0, + "content": "Paper Comparison Table Generation. Since bulleted list sections typically include closely related papers (e.g., different datasets), we additionally generate tables that compare and contrast all papers cited in that section using common aspects (e.g., size and annotation method). This pipeline is detailed in Newman et al. (2024). At a high level, the inputs are the query to Scholar QA, the section title, and the abstracts of all papers cited in the section. An LLM first produces a set of common aspects (columns) to compare papers (rows). Each cell (paper-aspect pair) is filled with a value using the full-text of the paper. Finally, as not all aspects are applicable to every paper (e.g., one paper might not be about a dataset), we filter out columns and rows with a high proportion of missing values. Figure 3 [A] shows an expanded table in Scholar QA where related papers from a section are compared across a set of common aspects ([B])." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.406, + 0.883, + 0.422 + ], + "angle": 0, + "content": "3 Scholar QA: Interface and Source Code" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.433, + 0.885, + 0.579 + ], + "angle": 0, + "content": "Scholar QA is open-sourced as an extensible Python package (ai2-scholar-qa) and a Typescript and React-based interactive web application. The LLM functionality of Scholar QA is implemented with litellm, which supports swapping a variety of models using your own keys. Thus, the community can build upon Scholar QA and easily visualize the results (examples in Appendix A). Below we describe the user experience of the demo.3" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.594, + 0.886, + 0.788 + ], + "angle": 0, + "content": "Progress and Section Streaming. High system latency can hinder usability. On average, Scholar QA produces a full report in 2.5 minutes (N=500, \\(\\sigma = 70\\mathrm{s}\\)), which is comparable to modern LLM-based research tools. To further improve usability, the following designs were used: 1) Displaying detailed real-time progress of the system (Nielsen, 1994) so users can examine the number of papers, passages, and sections being processed. 2) Presenting each section as soon as it is generated, so users can begin browsing the first section in 50 seconds (N=500, \\(\\sigma = 24\\mathrm{s}\\)) post issuing a query (Appendix H)." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.799, + 0.885, + 0.895 + ], + "angle": 0, + "content": "Expandable Sections. By default, sections are collapsed showing only their titles, TLDR summaries, and number of cited sources. This gives users a gist of the information included in the report (Figure 2 [A]). Users can then click on the title of a section they wish to read to expand it ([B])." + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.895, + 0.883, + 0.922 + ], + "angle": 0, + "content": "3Our production system has a few additional features like downloadable reports, login and links to other Ai2 systems." + } + ], + [ + { + "type": "image", + "bbox": [ + 0.153, + 0.082, + 0.843, + 0.518 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.113, + 0.526, + 0.884, + 0.556 + ], + "angle": 0, + "content": "Figure 2: Multi-section [B] report generated by Scholar QA. References are linked to supporting excerpts [C]. Thumbs and free text feedback are collected for the full report [A], and also for each section and inline table." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.581, + 0.489, + 0.693 + ], + "angle": 0, + "content": "References and Evidence Excerpts. To verify the claims in the report, users can click on the inline citations (Figure 2 [C]) or the pink excerpt icon in the inline table cells (Figure 3 [C]) to bring up a popup paper card. From the paper card, they can see the relevant excerpts used during the generation or click on the title to open the paper directly." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.697, + 0.49, + 0.745 + ], + "angle": 0, + "content": "User Feedback Collection. We collect thumbs up/down or textual feedback for the whole report (Figure 2 [A]) and at each section and inline table." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.762, + 0.244, + 0.777 + ], + "angle": 0, + "content": "4 Evaluation" + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.788, + 0.234, + 0.802 + ], + "angle": 0, + "content": "4.1 Retrieval" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.81, + 0.49, + 0.922 + ], + "angle": 0, + "content": "We tuned our retrieval setup by optimizing ranking over a dev set of 500 synthetic queries (see Appendix C) and the top 1000 passages for each based on GIST embedding distance (Solatorio, 2024). We generated binary relevance labels with gpt-4-turbo (see Appendix B for the prompt), which were found to have \\(80\\%\\) agreement with" + }, + { + "type": "image", + "bbox": [ + 0.513, + 0.607, + 0.885, + 0.83 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.508, + 0.854, + 0.885, + 0.898 + ], + "angle": 0, + "content": "Figure 3: Inline tables compare papers [A] with common aspects [B] with values linked to supporting excerpts from the papers [C]." + } + ], + [ + { + "type": "image", + "bbox": [ + 0.119, + 0.085, + 0.481, + 0.235 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.113, + 0.25, + 0.49, + 0.336 + ], + "angle": 0, + "content": "Figure 4: Embedding ranking performance for various compression methods and matryoshka cutoffs. The \\( x \\)-axis indicates the size of the vector index based relative to using int8 quantization and the full embedding size. The red circle indicates the selected configuration. Embedding size is notated next to each point." + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.35, + 0.457, + 0.366 + ], + "angle": 0, + "content": "human annotators on a sample of 100 queries." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.371, + 0.488, + 0.466 + ], + "angle": 0, + "content": "Pipeline Tuning. We optimized several aspects of retrieval over this dev set: embedding model selection and quantization method for it, the components and weights in the final ensemble, and (when relevant) the target Matryoshka dimension for the embeddings (Kusupati et al., 2024)." + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.468, + 0.489, + 0.773 + ], + "angle": 0, + "content": "We experimented with medium sized embedding models based on top performers on the retriever and ranking tasks of the MTEB (Muennighoff et al., 2022) leaderboard on HuggingFace. Table 4 in Appendix D lists our candidate models. The mxbai-embed-large-v1 (Lee et al., 2024) embeddings performed best over our dev set. Figure 4 validates our choice of quantization method and target Matryoshka dimension for these embeddings. We chose unary quantization with no Matryoshka truncation, (indicated by a red circle on the plot) since it satisfied our storage constraints without a large drop in performance. We experimented with assembling SparseEmbed (Kong et al., 2023), embedding cosine similarity, BM25, and chose the latter two (weight split of (0.6, 0.4) respectively) based on the results (See Appendix E). The BM25 scores are normalized with min-max scaling before computing the ensemble score." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.787, + 0.245, + 0.803 + ], + "angle": 0, + "content": "4.2 Reranking" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.809, + 0.49, + 0.922 + ], + "angle": 0, + "content": "We chose the re-ranker based on evaluation over a mixture of real scientific questions from the Stack Exchange Computer Science, Math, and Statistics communities, real research queries written by the authors and their colleagues, and synthetic ones generated by fine-tuning GPT-4o-mini over questions from the ScholarQA-CS dataset (Asai et al.," + }, + { + "type": "table", + "bbox": [ + 0.511, + 0.082, + 0.888, + 0.205 + ], + "angle": 0, + "content": "
Model (Size)Latency (sec/query)nDCG @ 10mRR
bge-reranker-v2-m3 (568M)0.140.9130.973
akariasai/ranker_large (568M)0.140.9060.970
jina-reranker-v2-base (278M)0.060.9070.972
mxbai-rerank-large-v1 (435M)0.460.9270.975
mxbai-rerank-base-v1 (184M)0.190.9190.974
mxbai-rerank-xsmall-v1 (70M)0.110.9110.970
mxbai-rerank-base-v2 (0.5B)0.400.9180.974
mxbai-rerank-large-v2 (1.5B)0.700.9110.975
" + }, + { + "type": "table_caption", + "bbox": [ + 0.508, + 0.216, + 0.882, + 0.245 + ], + "angle": 0, + "content": "Table 1: Cross encoder re-ranker results on our dataset of GPT-4o labels. The best results are highlighted." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.258, + 0.884, + 0.436 + ], + "angle": 0, + "content": "2024). For a given query, passages are retrieved and then awarded a relevance score in the range 0-3 with GPT-4o. We experiment with multiple state-of-the-art re-rankers (Chen et al., 2024; Shakir et al., 2024; Asai et al., 2024), and, as shown in Table 2, mxbai-erank-large-v1 gives the best results across the board (even outperforming its v2 model on our task). To reduce latency for deployment, we implemented optimizations like Pytorch model compilation. We release the evaluation data consisting of 2,426 queries and 225,618 passages." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.448, + 0.645, + 0.463 + ], + "angle": 0, + "content": "4.3 Generation" + }, + { + "type": "text", + "bbox": [ + 0.507, + 0.47, + 0.884, + 0.631 + ], + "angle": 0, + "content": "We evaluate the final output of Scholar QA on the ScholarQA-CS dataset which consists of expert-annotated rubrics for 100 Computer Science research questions. The question-specific expert rubrics account for \\(60\\%\\) of the final score, while the rest is computed based on global metrics of length, expertise and citations. We use GPT-4o (Hurst et al., 2024) as a judge with the utility provided by Asai et al. (2024) for automatic evaluation and compare against several baselines." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.632, + 0.884, + 0.922 + ], + "angle": 0, + "content": "As shown in Table 2, our system outperforms popular LLMs: Llama 3.1 (Dubey et al., 2024), GPT 4.1 and Claude Sonnet 3.7 (Anthropic, 2024). It even outperforms reasoning models such as Sonnet 3.7 Thinking (Anthropic, 2025), o1-mini (OpenAI, 2024b) and o3-mini (Zhang et al., 2025) overall on the Scholar QA-CS benchmark. This setup lacks any retrieval so the models generate the responses completely from parametric memory. The benchmark rewards attribution and supporting evidence as a measure of trust in the system, so these models score lower overall. The reasoning based models perform better than our system on the rubrics score, which suggests that they may be superior backbones for our system. However, due to the additional reasoning tokens, these models are more expensive and also significantly increase latency." + } + ], + [ + { + "type": "text", + "bbox": [ + 0.113, + 0.085, + 0.493, + 0.568 + ], + "angle": 0, + "content": "For contemporary QA systems, we compare against OpenScholar with GPT-4o\\(^{4}\\), PaperQA2 (Skarlinski et al., 2024), Perplexity's Sonar Deep Research and STORM (Shao et al., 2024a). PaperQA2 did not release their retrieval corpus, so we substitute it with our retrieval pipeline for a fair comparison. Scholar QA obtains the best scores both on rubrics and overall, with the variant using Claude 3.7 Sonnet as the backbone scoring 2.4 points higher than STORM. For these QA systems, we also evaluate the attribution quality based on ALCE (Gao et al., 2023), which proposes entailment between claims and evidence to compute citation precision and recall. Again, we use GPT-4o as a judge to predict entailment (See Appendix F for the prompt) and treat each sentence in a response as a claim. Even with a report spanning multiple sections where all the sentences might not be cited, Scholar QA comes out far ahead of the other QA systems. Due to a lack of retrieval, this evaluation was not conducted when the LLMs are simply prompted to generate a response from memory. An interesting discovery from our analysis was that with an updated version of GPT-4o (i.e. gpt-4o-2024-11-20) as the judge, the scores are inflated compared to using gpt-4o-2024-08-06, even though the relative rankings are consistent (See Appendix J). For parity with Asai et al. (2023), we report the rubrics and citation scores with the older and newer model as the judge, respectively." + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.57, + 0.49, + 0.826 + ], + "angle": 0, + "content": "During our initial experiments, we restricted ScholarQA to only summarize the insights conditioned on the quotes extracted from retrieved passages. However, in cases where the retrieved passages were not relevant enough, the system failed to answer the question in favor of just discussing the information in the quotes. Moreover, for over \\(30\\%\\) of instances in ScholarQA-CS, the rubrics require background information, even though the question might not. So, we updated our system LLM prompts to - a) Generate section text from memory if there is a lack of relevant retrieved passages and cite as LLM Memory and b) generate the first section as a background or introduction for the rest of the answer. The results reported here are obtained post these changes." + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.828, + 0.49, + 0.86 + ], + "angle": 0, + "content": "To finalize the backbone LLM for the production web application we conducted an anonymized pair" + }, + { + "type": "table", + "bbox": [ + 0.51, + 0.082, + 0.885, + 0.216 + ], + "angle": 0, + "content": "
ModelScoreModelScore
RubricsTotalRubricsTotalCite
LLM Prompt (No Retrieval)QA Systems
Llama 3.1-8B48.847.3SQA-Claude 3.7 S58.061.948.1
Llama 3.1-70B52.448.6SQA-Claude 3.5 S52.661.352.1
Claude 3.5 S50.446.6OS-GPT-4o49.353.525.9
Claude 3.7 S61.555.9PaperQA238.751.425.3
+Thinking62.755.7Perplex. Sonar DR38.752.825.2
GPT-4.163.256.2STORM54.259.540.2
o1-mini62.355.5
o3-mini60.650.2
" + }, + { + "type": "table_caption", + "bbox": [ + 0.508, + 0.226, + 0.885, + 0.354 + ], + "angle": 0, + "content": "Table 2: Evaluation results on ScholarQA-CS benchmark. System responses are either generated by simply prompting LLMs with the questions or by issuing the queries to RAG based QA systems. Expert annotated rubrics only scores are reported in addition to the overall total. The overall best results are highlighted and best results within a category are underlined. SQA: Ai2 Scholar QA, OS: Open Scholar, S: Sonnet, Claude 3.5 S: claude-3-5-sonnet-20241022." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.377, + 0.884, + 0.472 + ], + "angle": 0, + "content": "wise comparison among the authors of this work. We compare Claude 3.7 against 3.5. Out of 18 comparisons, Claude 3.7 Sonnet was the overwhelming favorite with 17 wins, reinforcing our hypothesis that (with no other changes) our system improves with newer and better backbone LLMs." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.501, + 0.851, + 0.516 + ], + "angle": 0, + "content": "4.4 Real-world Usage and User Feedback" + }, + { + "type": "text", + "bbox": [ + 0.507, + 0.531, + 0.885, + 0.756 + ], + "angle": 0, + "content": "We have publicly deployed Scholar QA for 9 weeks, and received \\(30.2\\mathrm{k}\\) questions from 8,219 unique visitors. On average, each response is about \\(2.4\\mathrm{k}\\) words and costs $0.50 to produce. We observed 1,075 monthly repeated users who had issued queries on two distinct days over the course of a 30 day window. We analyze the user query types and the most prominent themes were deep-dive into specific research topics (15k) and comparative analysis of specific prior work (5k) (detailed distribution in Appendix I). A total of 2,433 thumbs feedback were submitted (Figure 2 [A]) and \\(85\\%\\) were positive. These suggests real-world users benefited from using Scholar QA." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.761, + 0.885, + 0.922 + ], + "angle": 0, + "content": "For insight into the failure modes, we manually examined the 383 instances of neutral/negative freeform feedback. Table 3 lists the feedback types we identified along with their counts as of May 2025 (example feedback in Appendix G). We hypothesize that follow-up questions may help address insufficient answer detail and cases with a lack of retrieved documents, while improved retrieval may help address incomplete or incorrect references and off-topic responses." + }, + { + "type": "page_footnote", + "bbox": [ + 0.113, + 0.871, + 0.49, + 0.922 + ], + "angle": 0, + "content": "Our results are not identical to Asai et al. (2024) due to variance across LLM-as-a-judge runs. Their reported total score for OS-GPT-4o is 57.7. We re-ran the evaluation in order to obtain rubrics only scores, which they did not report." + } + ], + [ + { + "type": "table", + "bbox": [ + 0.117, + 0.082, + 0.487, + 0.196 + ], + "angle": 0, + "content": "
CategoryCount
Incorrect or Missing References126
Off-topic or Misunderstood Query113
Request for More Detail or Specificity289
General Feedback on Quality149
Language or Format Issues78
" + }, + { + "type": "table_caption", + "bbox": [ + 0.158, + 0.205, + 0.443, + 0.22 + ], + "angle": 0, + "content": "Table 3: Feedback Categories and Counts" + }, + { + "type": "title", + "bbox": [ + 0.115, + 0.235, + 0.271, + 0.25 + ], + "angle": 0, + "content": "5 Related Work" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.265, + 0.49, + 0.666 + ], + "angle": 0, + "content": "Scientific Question Answering. Answering scientific questions involves navigating scholarly sources and accurately retrieving and synthesizing them. Recently, OpenScholar (Asai et al., 2024) introduced a retrieval-augmented model designed explicitly for scientific literature synthesis with citation-supported responses with significant improvement in accuracy and reduced citation hallucination. Scholar QA extends its capabilities by leveraging the latest state-of-the-art LLMs and an open source generation pipeline that filters literature into precise quotes and produces thematically organized and detailed answers. STORM (Shao et al., 2024b) synthesizes comprehensive, Wikipedia-like articles, a distinct task from long-form scientific question answering. Other works have focused on literature review synthesis: LitLLM (Agarwal et al., 2024), which like Scholar QA uses a structured planning-and-generation pipeline similar, and SurveyForge (Yan et al., 2025), which outlines heuristics before generation. Their code was not available at the time of our evaluation. Zhou et al. (2025) present a survey categorizing AI-driven research support systems across various stages of the scientific process, including literature synthesis." + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.68, + 0.49, + 0.922 + ], + "angle": 0, + "content": "Commercial Tools for Scientific QA. Commercial RAG tools have emerged to facilitate research specifically tailored for scientific literature, such as Consensus (Consensus, 2024), which synthesizes findings from research papers, Scite (Scite, 2024), which evaluates claims by analyzing citation contexts, and Elicit (Elicit, 2024), which supports structured scientific literature reviews. Other general-purpose tools also support scientific inquiries: Perplexity (Perplexity, 2024), You.com (You.com, 2024), OpenAI Deep Research (OpenAI, 2024a) and Gemini Deep Research (DeepMind, 2024). Although these platforms leverage advanced retrieval and generation capabilities to facilitate literature reviews and deliver rapid insights," + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.085, + 0.883, + 0.166 + ], + "angle": 0, + "content": "they can be too expensive for widespread academic use and typically lack transparency regarding their pipelines. In contrast, Scholar QA is free with open sourced code and access to search APIs that enable the research community to build upon it." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.177, + 0.642, + 0.192 + ], + "angle": 0, + "content": "6 Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.203, + 0.885, + 0.299 + ], + "angle": 0, + "content": "We present Ai2 Scholar QA, a freely-available longform literature synthesis system that generates reports for complex scientific questions. We release key components as open source code and public APIs, and report experiments analyzing design decisions and demonstrate state-of-the-art results." + }, + { + "type": "title", + "bbox": [ + 0.51, + 0.311, + 0.615, + 0.326 + ], + "angle": 0, + "content": "Limitations" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.337, + 0.884, + 0.401 + ], + "angle": 0, + "content": "Supplementing the user feedback discussed in subsection 4.4, we would like to outline some limitations of our system and evaluation and our plans to mitigate them as part of fuvre work:" + }, + { + "type": "text", + "bbox": [ + 0.52, + 0.412, + 0.884, + 0.637 + ], + "angle": 0, + "content": "(i) Ai2 Scholar QA uses proprietary and closed-source LLM as the backbone for our production pipeline. As shown in Table 2, open source models lag behind the proprietary models in our evaluation. However, we are actively experimenting with open-sourced LLMs to replace the closed ones partially or completely in the pipeline. The open-sourced models will be specifically trained to do well on long-form scientific question answering and each of the sub-tasks in our multi-step generation. Further, our code is open-sourced and can easily be used with potentially any available LLM api provider supported by litellm." + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.648, + 0.885, + 0.922 + ], + "angle": 0, + "content": "(ii) We evaluate the answers generated by Scholar QA and compare against other systems on ScholarQA-CS dataset in subsection 4.3. Even though the answer rubrics are collected via human annotation, the evaluation is only limited to questions in the Computer Science domain and further relies completely on an LLM as the evaluator. In ongoing work, we are investigating more accurate benchmarks for evaluating long form scientific answers. Our approach uses real queries posed by users to Scholar QA, and human preference labels over answers from multiple systems in not just Computer Science, but Biomedicine and other scientific domains. These labels can serve as not only for evaluation, but also as training signals for models." + } + ], + [ + { + "type": "title", + "bbox": [ + 0.115, + 0.085, + 0.279, + 0.102 + ], + "angle": 0, + "content": "Acknowledgments" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.11, + 0.491, + 0.271 + ], + "angle": 0, + "content": "We would like to thank the anonymous reviewers for helpful comments, suggestions and feedback on the manuscript. We would also like to acknowledge the Ai2 ScholarQA users for providing constructive feedback that helped us improve the system. Finally, we thank David Albright for helping with the demo video, the Ai2 communications team for their help with user outreach, and Ai2 engineers and researchers for their help with user testing before launch." + }, + { + "type": "title", + "bbox": [ + 0.115, + 0.297, + 0.214, + 0.312 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.32, + 0.49, + 0.386 + ], + "angle": 0, + "content": "Shubham Agarwal, Gaurav Sahu, Abhay Puri, Issam Hadj Laradji, Krishnamurthy Dj Dvijotham, Jason Stanley, Laurent Charlin, and Christopher Pal. 2024. Litllms, llms for literature review: Are we there yet?" + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.395, + 0.489, + 0.422 + ], + "angle": 0, + "content": "Anthropic. 2024. The claude 3 model family: Opus, sonnet, haiku." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.431, + 0.449, + 0.446 + ], + "angle": 0, + "content": "Anthropic. 2025. Claude 3.7 sonnet system card." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.455, + 0.49, + 0.56 + ], + "angle": 0, + "content": "Akari Asai, Jacqueline He, Rulin Shao, Weijia Shi, Amanpreet Singh, Joseph Chee Chang, Kyle Lo, Luca Soldaini, Sergey Feldman, Mike D'Arcy, David Wadden, Matt Latzke, Minyang Tian, Pan Ji, Shengyan Liu, Hao Tong, Bohao Wu, Yanyu Xiong, Luke S. Zettlemoyer, and 6 others. 2024. Openscholar: Synthesizing scientific literature with retrieval-augmented lms. ArXiv, abs/2411.14199." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.569, + 0.489, + 0.622 + ], + "angle": 0, + "content": "Akari Asai, Zeqiu Wu, Yizhong Wang, Avirup Sil, and Hannaneh Hajishirzi. 2023. Self-rag: Learning to retrieve, generate, and critique through self-reflection. ArXiv, abs/2310.11511." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.632, + 0.489, + 0.698 + ], + "angle": 0, + "content": "Jianlv Chen, Shitao Xiao, Peitian Zhang, Kun Luo, Defu Lian, and Zheng Liu. 2024. Bge m3-embedding: Multi-lingual, multi-functionality, multi-granularity text embeddings through self-knowledge distillation. Preprint, arXiv:2402.03216." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.707, + 0.489, + 0.733 + ], + "angle": 0, + "content": "Consensus. 2024. Consensus - ai for research. Accessed: 2025-03-28." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.743, + 0.489, + 0.77 + ], + "angle": 0, + "content": "Google DeepMind. 2024. Gemini - deep research mode. Accessed: 2025-03-28." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.779, + 0.489, + 0.884 + ], + "angle": 0, + "content": "Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Amy Yang, Angela Fan, Anirudh Goyal, Anthony S. Hartshorn, Aobo Yang, Archi Mitra, Archie Sravankumar, Artem Korenev, Arthur Hinsvark, Arun Rao, Aston Zhang, and 510 others. 2024. The llama 3 herd of models. ArXiv, abs/2407.21783." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.894, + 0.489, + 0.92 + ], + "angle": 0, + "content": "Elicit. 2024. Elicit - the ai research assistant. Accessed: 2025-03-28." + }, + { + "type": "list", + "bbox": [ + 0.117, + 0.32, + 0.49, + 0.92 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.086, + 0.883, + 0.14 + ], + "angle": 0, + "content": "Tianyu Gao, Howard Yen, Jiatong Yu, and Danqi Chen. 2023. Enabling large language models to generate text with citations. In Conference on Empirical Methods in Natural Language Processing." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.147, + 0.885, + 0.251 + ], + "angle": 0, + "content": "OpenAI Aaron Hurst, Adam Lerer, Adam P. Goucher, Adam Perelman, Aditya Ramesh, Aidan Clark, AJ Ostrow, Akila Welihinda, Alan Hayes, Alec Radford, Aleksander Mkadry, Alex Baker-Whitcomb, Alex Beutel, Alex Borzunov, Alex Carney, Alex Chow, Alexander Kirillov, Alex Nichol, Alex Paino, and 397 others. 2024. Gpt-4o system card. ArXiv, abs/2410.21276." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.26, + 0.885, + 0.377 + ], + "angle": 0, + "content": "Rodney Michael Kinney, Chloe Anastasiades, Russell Authur, Iz Beltagy, Jonathan Bragg, Alexandra Buraczynski, Isabel Cachola, Stefan Candra,oganand Chandrasekhar, Arman Cohen, Miles Crawford, Doug Downey, Jason Dunkelberger, Oren Etzioni, Rob Evans, Sergey Feldman, Joseph Gorney, David W. Graham, F.Q. Hu, and 29 others. 2023. The semantic scholar open data platform. *ArXiv*, abs/2301.10140." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.386, + 0.885, + 0.478 + ], + "angle": 0, + "content": "Weize Kong, Jeffrey M. Dudek, Cheng Li, Mingyang Zhang, and Michael Bendersky. 2023. Sparseembed: Learning sparse lexical representations with contextual embeddings for retrieval. In Proceedings of the 46th International ACM SIGIR Conference on Research and Development in Information Retrieval, SIGIR '23, page 2399-2403. ACM." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.486, + 0.885, + 0.564 + ], + "angle": 0, + "content": "Aditya Kusupati, Gantavya Bhatt, Aniket Rege, Matthew Wallingford, Aditya Sinha, Vivek Ramanujan, William Howard-Snyder, Kaifeng Chen, Sham Kakade, Prateek Jain, and Ali Farhadi. 2024. Matryoshka representation learning. Preprint, arXiv:2205.13147." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.573, + 0.883, + 0.613 + ], + "angle": 0, + "content": "Sean Lee, Aamir Shakir, Darius Koenig, and Julius Lipp. 2024. Open source strikes bread - new fluffy embeddings model." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.621, + 0.885, + 0.699 + ], + "angle": 0, + "content": "Patrick Lewis, Ethan Perez, Aleksandara Piktus, Fabio Petroni, Vladimir Karpukhin, Naman Goyal, Heinrich Kuttler, Mike Lewis, Wen tau Yih, Tim Rocktäschel, Sebastian Riedel, and Douwe Kiela. 2020. Retrieval-augmented generation for knowledge-intensive nlp tasks. ArXiv, abs/2005.11401." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.707, + 0.885, + 0.773 + ], + "angle": 0, + "content": "Kyle Lo, Lucy Lu Wang, Mark Neumann, Rodney Michael Kinney, and Daniel S. Weld. 2020. S2orc: The semantic scholar open research corpus. In Annual Meeting of the Association for Computational Linguistics." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.781, + 0.883, + 0.835 + ], + "angle": 0, + "content": "Niklas Muennighoff, Nouamane Tazi, Loic Magne, and Nils Reimers. 2022. Mteb: Massive text embedding benchmark. In Conference of the European Chapter of the Association for Computational Linguistics." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.842, + 0.885, + 0.921 + ], + "angle": 0, + "content": "Benjamin Newman, Yoonjoo Lee, Aakanksha Naik, Pao Siangliulue, Raymond Fok, Juho Kim, Daniel S. Weld, Joseph Chee Chang, and Kyle Lo. 2024. Arxiv digestables: Synthesizing scientific literature into tables using language models. In Conference on Empirical Methods in Natural Language Processing." + }, + { + "type": "list", + "bbox": [ + 0.511, + 0.086, + 0.885, + 0.921 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.086, + 0.487, + 0.139 + ], + "angle": 0, + "content": "Jakob Nielsen. 1994. Enhancing the explanatory power of usability heuristics. In Proceedings of the SIGCHI conference on Human Factors in Computing Systems, pages 152-158." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.151, + 0.487, + 0.175 + ], + "angle": 0, + "content": "OpenAI. 2024a. Chatgpt - deep research mode. Accessed: 2025-03-28." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.189, + 0.389, + 0.202 + ], + "angle": 0, + "content": "OpenAI. 2024b. Openai o1 system card." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.214, + 0.487, + 0.239 + ], + "angle": 0, + "content": "Perplexity. 2024. Perplexity ai - ask anything. Accessed: 2025-03-28." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.252, + 0.487, + 0.316 + ], + "angle": 0, + "content": "Nils Reimers and Iryna Gurevych. 2019. Sentence-bert: Sentence embeddings using siamese bert-networks. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing. Association for Computational Linguistics." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.329, + 0.487, + 0.353 + ], + "angle": 0, + "content": "Scite. 2024. Scite - smart citations for research. Accessed: 2025-03-28." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.367, + 0.487, + 0.405 + ], + "angle": 0, + "content": "Aamir Shakir, Darius Koenig, Julius Lipp, and Sean Lee. 2024. Boost your search with the crispy mixedbread rerank models." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.417, + 0.487, + 0.535 + ], + "angle": 0, + "content": "Yijia Shao, Yucheng Jiang, Theodore Kanell, Peter Xu, Omar Khattab, and Monica Lam. 2024a. Assisting in writing Wikipedia-like articles from scratch with large language models. In Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), pages 6252-6278, Mexico City, Mexico. Association for Computational Linguistics." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.547, + 0.487, + 0.611 + ], + "angle": 0, + "content": "Yijia Shao, Yucheng Jiang, Theodore A. Kanell, Peter Xu, Omar Khattab, and Monica S. Lam. 2024b. Assisting in writing wikipedia-like articles from scratch with large language models. Preprint, arXiv:2402.14207." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.624, + 0.487, + 0.702 + ], + "angle": 0, + "content": "Michael D. Skarlinski, Sam Cox, Jon M. Laurent, James D. Braza, Michaela M. Hinks, Michael J Hammerling, Manvitha Ponnapati, Samuel G. Rodriques, and Andrew D. White. 2024. Language agents achieve superhuman synthesis of scientific knowledge. ArXiv, abs/2409.13740." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.714, + 0.487, + 0.779 + ], + "angle": 0, + "content": "Aviv Slobodkin, Eran Hirsch, Arie Cattan, Tal Schuster, and Ido Dagan. 2024. Attribute first, then generate: Locally-attributable grounded text generation. In Annual Meeting of the Association for Computational Linguistics." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.791, + 0.486, + 0.83 + ], + "angle": 0, + "content": "Aivin V. Solatorio. 2024. Gistembed: Guided in-sample selection of training negatives for text embedding fine-tuning. ArXiv, abs/2402.16829." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.842, + 0.487, + 0.92 + ], + "angle": 0, + "content": "Saba Sturua, Isabelle Mohr, Mohammad Kalim Akram, Michael Gunther, Bo Wang, Markus Kimmel, Feng Wang, Georgios Mastrupas, Andreas Koukounas, Andreas Koukounas, Nan Wang, and Han Xiao. 2024. jina-embeddings-v3: Multilingual embeddings with task lora. Preprint, arXiv:2409.10173." + }, + { + "type": "list", + "bbox": [ + 0.117, + 0.086, + 0.487, + 0.92 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.086, + 0.882, + 0.15 + ], + "angle": 0, + "content": "Liang Wang, Nan Yang, Xiaolong Huang, Binxing Jiao, Linjun Yang, Daxin Jiang, Rangan Majumder, and Furu Wei. 2022. Text embeddings by weakly-supervised contrastive pre-training. arXiv preprint arXiv:2212.03533." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.162, + 0.882, + 0.227 + ], + "angle": 0, + "content": "Xiangchao Yan, Shiyang Feng, Jiakang Yuan, Renqiu Xia, Bin Wang, Bo Zhang, and Lei Bai. 2025. Surveyforge: On the outline heuristics, memory-driven generation, and multi-dimensional evaluation for automated survey writing." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.238, + 0.882, + 0.263 + ], + "angle": 0, + "content": "You.com. 2024. You.com - personalized ai search. Accessed: 2025-03-28." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.274, + 0.882, + 0.366 + ], + "angle": 0, + "content": "Brian Zhang, Eric Mitchell, Hongyu Ren, Kevin Lu, Max Schwarzer, Michelle Pokrass, Shengjia Zhao, Ted Sanders, Adam Kalai, Alexandre Passos, Benjamin Sokolowsky, Elaine Ya Le, Erik Ritter, Hao Sheng, Hanson Wang, Ilya Kostrikov, James Lee, Johannes Ferstad, Michael Lampe, and 93 others. 2025. Openai o3-mini system card." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.376, + 0.882, + 0.455 + ], + "angle": 0, + "content": "Zekun Zhou, Xiaocheng Feng, Lei Huang, Xiachong Feng, Ziyun Song, Ruihan Chen, Liang Zhao, Weitao Ma, Yuxuan Gu, Baoxin Wang, Dayong Wu, Guoping Hu, Ting Liu, and Bing Qin. 2025. From hypothesis to publication: A comprehensive survey of ai-driven research support systems." + }, + { + "type": "list", + "bbox": [ + 0.511, + 0.086, + 0.882, + 0.455 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "title", + "bbox": [ + 0.115, + 0.085, + 0.35, + 0.102 + ], + "angle": 0, + "content": "A Python Package Usage" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.109, + 0.489, + 0.173 + ], + "angle": 0, + "content": "Figure 5 shows a minimal example of running the system pipeline with the ai2-scholar-qa python package and how every component can be extended or modified as the users see fit." + }, + { + "type": "code", + "bbox": [ + 0.118, + 0.18, + 0.488, + 0.409 + ], + "angle": 0, + "content": "from scholarqa rag. reranker. reranker_base import CrossEncoderScores \nfrom scholarqa rag. retrieval import PaperFinderWithReranker \nfrom scholarqa rag. retriever_base import FullTextRetriever \nfrom scholarqa import ScholarQA \nCLAUSE_SONNET_3_7 = \"anthropic/clause-3-7-sonnet-20250219\" \n#Extends the scholarqa rag. retrieval.AAbstractRetriever class \nretriever \\(=\\) FullTextRetriever(n_retrieval=256, n_keyword_shrc=20) \n#Extends the scholarqa rag. reranker. reranker_base.AAbstractReranker class \nreranker \\(=\\) CrossEncoderScores(\"mixedbread-ai/mxbai-erank-large-v1\") \n#Wrapper class for retrieval \npaper_find \\(\\equiv\\) PaperFinderWithReranker(retriever, reranker, n_ rerank=50, context_threshold=0.5) \n#Scholar QA wrapper with the MultiStepQAPipeline integrated \nscholar_qa \\(=\\) ScholarQA(paper_find, llm_model \\(\\coloneqq\\) CLAUSEDSONNET_3_7) \nprint(scholar_qa answer_query(\"Which is the 9th planet in our solar system?\")) \n#Custom MultiStepQAPipeline class/steps \nfrom scholarqa rag. multi_step_qapipeline import MultiStepQAPipeline \nmqapipeline \\(=\\) MultiStepQAPipeline(llm_model \\(\\coloneqq\\) CLAUSEDSONNET_3_7) \npaperquotes \\(=\\) mqapipeline step_select Quotes(query,...)#Quote Extraction \nplan \\(=\\) mqapipeline step_clustering(query, paperquotes,...)#Outline and Clustering \n#Section Generation \nresponse \\(=\\) list(mqapipeline generate_iterations.summary(query, paperquotes, plan,...))" + }, + { + "type": "image_caption", + "bbox": [ + 0.154, + 0.419, + 0.448, + 0.433 + ], + "angle": 0, + "content": "Figure 5: ai2-scholar-qa usage example" + }, + { + "type": "title", + "bbox": [ + 0.115, + 0.461, + 0.404, + 0.478 + ], + "angle": 0, + "content": "B Document Relevance Prompt" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.485, + 0.49, + 0.532 + ], + "angle": 0, + "content": "We used the following prompt to obtain binary relevance labels, which agreed with human annotators \\(80\\%\\) of the time:" + }, + { + "type": "text", + "bbox": [ + 0.15, + 0.537, + 0.454, + 0.609 + ], + "angle": 0, + "content": "If any part of the following text is relevant to the following question, then return 1, otherwise return 0. Non-english results are not relevant, results which are primarily tables are not relevant." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.621, + 0.461, + 0.638 + ], + "angle": 0, + "content": "C Retrieval Tuning Query Generation" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.646, + 0.49, + 0.822 + ], + "angle": 0, + "content": "Queries for the dev set were obtained from three internal sources of human research questions, and a set of LLM generations. We experimented with several methods for constructing the synthetic LLM questions. Our approach was to generate questions similar to those asked by real users by prompting the LLM to output: (1) a question based on paragraphs retrieved from the corpus, and (2) a \"more general\" version of the first question. We only use the \"more general\" set since they were more similar to real user queries." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.833, + 0.436, + 0.85 + ], + "angle": 0, + "content": "D Embedding Models for Retrieval" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.857, + 0.49, + 0.921 + ], + "angle": 0, + "content": "We experimented with multiple top embedding models from the MTEB leader board to optimize retrieval for our system. These are outlined in Table 4." + }, + { + "type": "table", + "bbox": [ + 0.522, + 0.082, + 0.872, + 0.21 + ], + "angle": 0, + "content": "
HuggingFace embedding model name
Snowflake/snowflake-arctic-embed-m5
sentence-transformers/all-mpnet-base-v2 (Reimers and Gurevych, 2019)
avsolatorio/GIST-Embedding-v0 (Solatorio, 2024)
Snowflake/snowflake-arctic-embed-m-long6
intfloat/e5-base-v2 (Wang et al., 2022)
mixedbread-ai/mxbai-embed-large-v1 (Lee et al., 2024)
jinaai/jina-embeddings-v3 (Sturua et al., 2024)
" + }, + { + "type": "table_caption", + "bbox": [ + 0.528, + 0.22, + 0.863, + 0.234 + ], + "angle": 0, + "content": "Table 4: Embedding Models to optimize retrieval" + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.258, + 0.832, + 0.275 + ], + "angle": 0, + "content": "E Retrieval Ensemble Experiments" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.284, + 0.885, + 0.379 + ], + "angle": 0, + "content": "Figure 6 shows results of our ensembling experiments for the full-text retrieval index. SparseEmbed introduces an overhead with minimal performance gains, so we picked an ensemble of embedding similarity and BM25 as our final ranking metric." + }, + { + "type": "image", + "bbox": [ + 0.519, + 0.395, + 0.872, + 0.58 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.508, + 0.596, + 0.884, + 0.655 + ], + "angle": 0, + "content": "Figure 6: Ranking performance for various ensembles with relative size of the index required. Excluding SparseEmbed reduces the index size by \\(20\\%\\) without a significant drop in ranking performance." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.684, + 0.844, + 0.701 + ], + "angle": 0, + "content": "F Prompt for Evaluating Attribution" + }, + { + "type": "text", + "bbox": [ + 0.544, + 0.707, + 0.848, + 0.922 + ], + "angle": 0, + "content": "As an Attribution Validator, your task is to verify whether a given reference can support the given claim. A claim can be either a plain sentence or a question followed by its answer. Specifically, your response should clearly indicate the relationship: Attributable, Contradictory or Extrapolatory. A contradictory error occurs when you can infer that the answer contradicts the fact presented in the context, while an extrapolatory error means that you cannot infer the correctness of the answer based on the information provided in the context. Output your response as a json with only a single key \"output\" and a value of one among (\"Attributable\", \"Contradictory\"," + } + ], + [ + { + "type": "text", + "bbox": [ + 0.152, + 0.087, + 0.322, + 0.124 + ], + "angle": 0, + "content": "\"Extrapolatory\"). \nClaim: claim \nReference: ref_excerpt" + }, + { + "type": "title", + "bbox": [ + 0.115, + 0.137, + 0.373, + 0.155 + ], + "angle": 0, + "content": "G User Feedback Examples" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.163, + 0.486, + 0.195 + ], + "angle": 0, + "content": "Table 5 lists some examples of the user complaints for Scholar QA reports." + }, + { + "type": "table", + "bbox": [ + 0.117, + 0.206, + 0.498, + 0.379 + ], + "angle": 0, + "content": "
Feedback
The structure is good, but the articles you choose are not from top journals.
The first citation says that rabbits can obtain cholesterol from diet, not rats.
These provide a lot of general information about the topic, but nothing here actually addresses the central question I asked.
The answer did not address the ‘MOBILIZATION’ techniques at all! The answer is wrong because it addressed Exercise therapy!
They address the general setting, but not the specific question I asked.
It’s only analysing on SASAF model, but there are more.
" + }, + { + "type": "table_caption", + "bbox": [ + 0.139, + 0.388, + 0.462, + 0.403 + ], + "angle": 0, + "content": "Table 5: Example Feedback on Research Issues" + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.431, + 0.484, + 0.449 + ], + "angle": 0, + "content": "H Progress Updates and Report Sections" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.458, + 0.487, + 0.569 + ], + "angle": 0, + "content": "Figure 7 demonstrates how we display in real-time the progress of the system during generation. This included number of papers and passages the were processed in each step, as well as the outline as it is being generated. Each section appears as soon as it is generated, so users can begin browsing the first sections." + }, + { + "type": "image", + "bbox": [ + 0.117, + 0.579, + 0.486, + 0.807 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.123, + 0.818, + 0.479, + 0.833 + ], + "angle": 0, + "content": "Figure 7: Progress indication and section streaming." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.863, + 0.329, + 0.881 + ], + "angle": 0, + "content": "I Query Type Analysis" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.89, + 0.489, + 0.922 + ], + "angle": 0, + "content": "To analyze the types of questions users are asking, we use an LLM to categorize the queries. The most" + }, + { + "type": "image", + "bbox": [ + 0.527, + 0.102, + 0.88, + 0.253 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.509, + 0.266, + 0.884, + 0.295 + ], + "angle": 0, + "content": "Figure 8: Distribution of different question types submitted to Scholar QA deployed Web application." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.32, + 0.883, + 0.4 + ], + "angle": 0, + "content": "prominent types were comprehensive deep-dive into a specific research topic (15k) and comparative analysis of prior work (5k). Other themes such as factoid QA or specific methods, datasets accounted for fewer queries." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.412, + 0.829, + 0.443 + ], + "angle": 0, + "content": "J Generation Results with updated GPT-40" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.454, + 0.884, + 0.583 + ], + "angle": 0, + "content": "Table 6 shows results on ScholarQA-CS with gpt-4o-2024-11-20 as the LLM judge. These results can be contrasted with the first two columns in Table 2 which are obtained with gpt-4o-2024-08-06 as the judge. Even though the absolute scores are inflated compared to Table 2, the relative rankings are about the same with Scholar QA getting the best overall score." + }, + { + "type": "table", + "bbox": [ + 0.522, + 0.594, + 0.871, + 0.729 + ], + "angle": 0, + "content": "
ModelScoreModelScore
RubricsTotalRubricsTotal
LLM Prompting (No Retrieval)QA Systems
Llama 3.1-8B51.848.2SQA-Claude 3.7 S67.367.2
Llama 3.1-70B57.051.2SQA-Claude 3.5 S61.367.1
Claude 3.5 S57.851.3OS-GPT-4o54.959.9
Claude 3.7 S68.460.8PaperQA243.854.1
+Thinking68.358.7Perplex. Sonar DR43.956.0
GPT-4.169.361.8STORM59.264.7
o1-mini69.161.3
o3-mini68.555.9
" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.738, + 0.885, + 0.88 + ], + "angle": 0, + "content": "Table 6: Evaluation results on ScholarQA-CS benchmark with gpt-4o-2024-11-20 as the judge. System responses are either generated by simply prompting LLMs with the questions or by issuing the queries to RAG based QA systems. Expert annotated rubrics only scores are reported in addition to the overall total. The overall best results are highlighted and best results within a category are underlined. SQA: Ai2 Scholar QA, OS: Open Scholar, S: Sonnet, Claude 3.5 S: claude-3-5-sonnet-20241022." + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_10xxx/2504.10861/a4e0028e-483e-4013-a80e-4d616bb12d80_origin.pdf b/data/2025/2504_10xxx/2504.10861/a4e0028e-483e-4013-a80e-4d616bb12d80_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..c4b15cfcaa438b3b5fb1f5e20a8987aee42bcdb0 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10861/a4e0028e-483e-4013-a80e-4d616bb12d80_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c1c0fef1eb83fc3bb8ae03625c235bc5fe3fcd271d19d3ad9a81bd1184c1611b +size 2383202 diff --git a/data/2025/2504_10xxx/2504.10861/full.md b/data/2025/2504_10xxx/2504.10861/full.md new file mode 100644 index 0000000000000000000000000000000000000000..ef2150ebad1d5afb12446fbd0d809b982c6352c4 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10861/full.md @@ -0,0 +1,318 @@ +# $\diamond$ Ai2 Scholar QA: Organized Literature Synthesis with Attribution + +# Amanpreet Singh* Joseph Chee Chang* Chloe Anastasiades* Dany Haddad* Aakanksha Naik Amber Tanaka Angele Zamarron Cecile Nguyen Jena D. Hwang Jason Dunkleberger Matt Latzke Smita Rao Jaron Lochner Rob Evans Rodney Kinney Daniel S. Weld Doug Downey* Sergey Feldman* + +Allen Institute for AI + +{amanpreets, sergey}@allenai.org + +# Abstract + +Retrieval-augmented generation is increasingly effective in answering scientific questions from literature, but many state-of-the-art systems are expensive and closed-source. We introduce Ai2 Scholar QA, a free online scientific question answering application. To facilitate research, we make our entire pipeline public: as a customizable open-source Python package1 and interactive web app, along with paper indexes accessible through public APIs and downloadable datasets. We describe our system in detail and present experiments analyzing its key design decisions. In an evaluation on a recent scientific QA benchmark, we find that Ai2 Scholar QA outperforms competing systems. + +![](images/57dca823c8b7ae7cfba9806c2ff0b51209622a04e30dde754ec83a334852b024.jpg) + +![](images/dbabd07881d038bbba4ce14a7b5fd344bdba0571ccbedb609f2101aa9a734980.jpg) + +![](images/b489c9848f844e933df64a0bc9c22d644c9e8218729b4c01c657c8dd7c272c9f.jpg) + +![](images/949e3a4965caf39889b4cadbd454b66dcf6db9c4bb79bec0c563cbfa0d7df351.jpg) + +qa.allen.ai +allenai/ai2-scholarqa-lib +Demo Video +Python Package + +# 1 Introduction + +Long-form scientific question answering systems use retrieval-augmented generation (RAG) (Lewis et al., 2020) over scientific literature to answer complex questions. These systems produce responses that bring together relevant insights from dozens of papers to help users rapidly learn about a body of scientific work. Examples are OpenScholar (Asai et al., 2024), Elicit, Consensus, and others §5. + +Most of these systems are expensive to use and closed source, relying on models, workflows, and retrieval solutions not shared publicly. These issues create barriers for researchers who wish to study or build on the work. In response, we introduce Ai2 Scholar QA, a free-to-use scientific QA system (qa.allen.ai), and share our key components as open source software and public APIs. + +Scholar QA follows a multi-stage pipeline (Figure 1) that starts by querying paper indexes: one + +from Semantic Scholar with over 100M abstracts, and a new index that we introduce in this work containing 11.7M full-text scientific papers. The pipeline then re-ranks the retrieved passages with a cross-encoder, and finally prompts a Large Language Model (LLM) to filter, cluster, and synthesize the passages into an answer. The final answer is presented to the user in a report with expandable sections of prose, bulleted lists, and tables. Claims in the answer are supported by citations, which can be clicked to reveal the cited paper's title and authors (with links to their corresponding Semantic Scholar pages), and in many cases relevant excerpt(s) from the paper, allowing for quick verification of the claim. + +The system is based on open source code, enabling the community to reproduce and build on it. We release the code for our pipeline, prompting workflow and Web application. The retrieval indexes, including the new full text search index, are available as Semantic Scholar APIs and dataset downloads, and are continually updated with new articles (Kinney et al., 2023). Together, these resources can be combined with any generative LLM API to power a complete long-form scientific QA application. Our production system currently uses Anthropic's Claude 3.7 (Anthropic, 2024). + +We present analyses that justify key design decisions in our architecture in §4. Our choice of retrieval models and configuration is informed by evaluation over a collection of real and synthetic user queries and accompanying passages judged for relevance by a LLM, both of which we release publicly. We compare Scholar QA's answers against several baselines, demonstrating that it achieves state-of-the-art performance on the ScholarQA-CS benchmark (Asai et al., 2024). Finally, we discuss the reception of Scholar QA by users. The strong majority $(85\%)$ of user feedback is positive, and the reported issues suggest important improvements for future work. + +![](images/b5df53f86fb06f99e0f4351fefeea2efe4c0a077e930e6c8769c098085df09e0.jpg) +Figure 1: Scholar QA Pipeline Overview + +# 2 Pipeline + +The Scholar QA architecture (Figure 1) has three primary components: 1) retrieval to identify relevant passages from a corpus of scientific literature; 2) a neural cross-encoder that re-ranks the passages to select the most relevant top-k; and 3) multi-step LLM generation to process the passages into a comprehensive report. Next, we describe each component of the pipeline in detail. + +Query Validation. Prior to processing a query, we employ OpenAI's omni-moderation-latest $^2$ model for safeguarding against potentially harmful content and return appropriate error messages. + +# 2.1 Retrieval + +We use the Semantic Scholar API (Kinney et al., 2023) for retrieval, specifically its endpoint for keyword search over paper abstracts, and our new endpoint for querying snippets from open-access papers. A query decomposer re-formulates the user query for each endpoint and retrieves up to 256 snippets and 20 abstracts. These texts are referred to as "passages" below. + +Query Decomposer. The two retrieval endpoints differ in their effective query formats (one targets keyword and the other semantic queries) and filtering of results based on the user's preferences for paper metadata (paper year, venue, field of study). In our query decomposition step, an LLM is prompted to re-format the user query into paraphrases appropriate for each endpoint, and to extract the user's requested settings for the metadata filters. We use the outputs of this step for retrieval. + +Search APIs. The Semantic Scholar keyword search API is described in Kinney et al. (2023). We introduce a new /snippet/search endpoint, which searches over a corpus of passages extracted from S2ORC (Lo et al., 2020), loaded into a Vespa cluster with papers and passages. Papers include metadata for filtering. Passages are derived from a pa + +per's title, abstract, or body and can be filtered at the paper level. The index includes 11.7M full-text papers across the fields of study listed here, and a total of 285.6M passages. + +Each passage is limited to 480 tokens and truncated at sentence and section boundaries where possible, having an overlap of one sentence (up to 64 tokens) with the preceding and following passages. Passage text is embedded with mxbai-embed-large-v1 (Lee et al., 2024) with binary quantization, and placed into a dense (approximate nearest neighbor) index, as well as a traditional sparse keyword index. + +We first retrieve a union of embedding and keyword-based matches, applying any specified filters. The filtered results are ranked with a weighted sum of embedding similarity and bm25 scores. + +# 2.2 Reranking + +The passages obtained from the retrieval step are subsequently passed to a neural re-ranker and the top 50 results are retained. The re-ranker is a cross-encoder that encodes both the query and a candidate document simultaneously and outputs a relevance score used to rank the documents. We selected mxbai-erank-large-v1 (Shakir et al., 2024) based on the results in §4.2 and host it on Modal with a single NVIDIA L40S GPU. + +# 2.3 Multi-step Generation + +The generation phase employs a three-step approach: first, the retrieved passages are processed to extract more precise quotes relevant to the query; second, the quotes are thematically clustered into separate sections appropriate for the answer; finally, a controlled generation process composes the final report one section at a time, synthesizing the quotes assigned to that section. + +Quote extraction. Passages from the retrieval stage can be lengthy and may contain extraneous information not useful for answering the user query (Asai et al., 2023). The quote extraction stage aims + +to select only the most relevant quotes from the passages to improve the precision of the answer. + +We instruct an LLM to extract verbatim quotes that directly contribute to answering the query (Slobodkin et al., 2024). As input to the extraction, we gather all passages from the re-ranker for a given paper, and concatenate these to the abstract of the paper. This aggregation helps create a richer context conducive to extracting relevant quotes. The LLM processes each paper's content independently and returns the selected quotes separated by ellipses. If the entire paper context is deemed irrelevant, it is discarded from further processing. + +Answer Outline and Clustering. For generating a comprehensive research report, the effective organization of reference materials is essential for its overall coherence. We propose a thematic outline framework where the answer is divided into sections representing topics, and the reference quotes are assigned to these topics. This mapping allows the system to selectively focus only on the pertinent subset of quotes when synthesizing a section. + +First, the LLM is instructed to generate a list of themes in logical order and the appropriate synthesis format for each theme, independent of the quotes from the previous step. The first section is always an introduction or background to provide the user the basics for understanding the answer. The format of each section can be either a paragraph or a bulleted list, serving different information needs. Paragraphs convey nuanced summaries from multiple papers, while bulleted lists enumerate related papers (e.g., models, datasets, or interactive systems). These list are also the catalyst for generating the comparison tables (see §2.3). Following this, the sections are assigned 0 or more quotes. In case no quote is assigned to a section, it is generated completely from the LLM weights. + +Report Generation. With the answer outline in place, each section of the report is synthesized serially conditioned on the query, reference sources, and the sections prior to it. The LLM is also instructed to generate a TLDR for each section. The references are either the quotes assigned to the section or abstracts of papers that are cited within these quotes. This citation following method allows the LLM to condition on and cite foundational sources which are not uncovered in retrieval. The LLM is instructed to cite the sources for each claim in the generated section text and cite generations from its parameters as LLM Memory. + +Paper Comparison Table Generation. Since bulleted list sections typically include closely related papers (e.g., different datasets), we additionally generate tables that compare and contrast all papers cited in that section using common aspects (e.g., size and annotation method). This pipeline is detailed in Newman et al. (2024). At a high level, the inputs are the query to Scholar QA, the section title, and the abstracts of all papers cited in the section. An LLM first produces a set of common aspects (columns) to compare papers (rows). Each cell (paper-aspect pair) is filled with a value using the full-text of the paper. Finally, as not all aspects are applicable to every paper (e.g., one paper might not be about a dataset), we filter out columns and rows with a high proportion of missing values. Figure 3 [A] shows an expanded table in Scholar QA where related papers from a section are compared across a set of common aspects ([B]). + +# 3 Scholar QA: Interface and Source Code + +Scholar QA is open-sourced as an extensible Python package (ai2-scholar-qa) and a Typescript and React-based interactive web application. The LLM functionality of Scholar QA is implemented with litellm, which supports swapping a variety of models using your own keys. Thus, the community can build upon Scholar QA and easily visualize the results (examples in Appendix A). Below we describe the user experience of the demo.3 + +Progress and Section Streaming. High system latency can hinder usability. On average, Scholar QA produces a full report in 2.5 minutes (N=500, $\sigma = 70\mathrm{s}$ ), which is comparable to modern LLM-based research tools. To further improve usability, the following designs were used: 1) Displaying detailed real-time progress of the system (Nielsen, 1994) so users can examine the number of papers, passages, and sections being processed. 2) Presenting each section as soon as it is generated, so users can begin browsing the first section in 50 seconds (N=500, $\sigma = 24\mathrm{s}$ ) post issuing a query (Appendix H). + +Expandable Sections. By default, sections are collapsed showing only their titles, TLDR summaries, and number of cited sources. This gives users a gist of the information included in the report (Figure 2 [A]). Users can then click on the title of a section they wish to read to expand it ([B]). + +3Our production system has a few additional features like downloadable reports, login and links to other Ai2 systems. + +![](images/c9a8ba26e2c2e5a83296e69aea82c8dcfe28e652909eb1832eb214e2af5358ec.jpg) +Figure 2: Multi-section [B] report generated by Scholar QA. References are linked to supporting excerpts [C]. Thumbs and free text feedback are collected for the full report [A], and also for each section and inline table. + +References and Evidence Excerpts. To verify the claims in the report, users can click on the inline citations (Figure 2 [C]) or the pink excerpt icon in the inline table cells (Figure 3 [C]) to bring up a popup paper card. From the paper card, they can see the relevant excerpts used during the generation or click on the title to open the paper directly. + +User Feedback Collection. We collect thumbs up/down or textual feedback for the whole report (Figure 2 [A]) and at each section and inline table. + +# 4 Evaluation + +# 4.1 Retrieval + +We tuned our retrieval setup by optimizing ranking over a dev set of 500 synthetic queries (see Appendix C) and the top 1000 passages for each based on GIST embedding distance (Solatorio, 2024). We generated binary relevance labels with gpt-4-turbo (see Appendix B for the prompt), which were found to have $80\%$ agreement with + +![](images/b8d80b8b7ef2cc0d21f8dccb37dcca884443d727a60e15f37512e39fd2a5eff8.jpg) +Figure 3: Inline tables compare papers [A] with common aspects [B] with values linked to supporting excerpts from the papers [C]. + +![](images/81b8047cfb6dc431ad8aec3f0c23b80473c05396747e736928043641fcddda80.jpg) +Figure 4: Embedding ranking performance for various compression methods and matryoshka cutoffs. The $x$ -axis indicates the size of the vector index based relative to using int8 quantization and the full embedding size. The red circle indicates the selected configuration. Embedding size is notated next to each point. + +human annotators on a sample of 100 queries. + +Pipeline Tuning. We optimized several aspects of retrieval over this dev set: embedding model selection and quantization method for it, the components and weights in the final ensemble, and (when relevant) the target Matryoshka dimension for the embeddings (Kusupati et al., 2024). + +We experimented with medium sized embedding models based on top performers on the retriever and ranking tasks of the MTEB (Muennighoff et al., 2022) leaderboard on HuggingFace. Table 4 in Appendix D lists our candidate models. The mxbai-embed-large-v1 (Lee et al., 2024) embeddings performed best over our dev set. Figure 4 validates our choice of quantization method and target Matryoshka dimension for these embeddings. We chose unary quantization with no Matryoshka truncation, (indicated by a red circle on the plot) since it satisfied our storage constraints without a large drop in performance. We experimented with assembling SparseEmbed (Kong et al., 2023), embedding cosine similarity, BM25, and chose the latter two (weight split of (0.6, 0.4) respectively) based on the results (See Appendix E). The BM25 scores are normalized with min-max scaling before computing the ensemble score. + +# 4.2 Reranking + +We chose the re-ranker based on evaluation over a mixture of real scientific questions from the Stack Exchange Computer Science, Math, and Statistics communities, real research queries written by the authors and their colleagues, and synthetic ones generated by fine-tuning GPT-4o-mini over questions from the ScholarQA-CS dataset (Asai et al., + +
Model (Size)Latency (sec/query)nDCG @ 10mRR
bge-reranker-v2-m3 (568M)0.140.9130.973
akariasai/ranker_large (568M)0.140.9060.970
jina-reranker-v2-base (278M)0.060.9070.972
mxbai-rerank-large-v1 (435M)0.460.9270.975
mxbai-rerank-base-v1 (184M)0.190.9190.974
mxbai-rerank-xsmall-v1 (70M)0.110.9110.970
mxbai-rerank-base-v2 (0.5B)0.400.9180.974
mxbai-rerank-large-v2 (1.5B)0.700.9110.975
+ +Table 1: Cross encoder re-ranker results on our dataset of GPT-4o labels. The best results are highlighted. + +2024). For a given query, passages are retrieved and then awarded a relevance score in the range 0-3 with GPT-4o. We experiment with multiple state-of-the-art re-rankers (Chen et al., 2024; Shakir et al., 2024; Asai et al., 2024), and, as shown in Table 2, mxbai-erank-large-v1 gives the best results across the board (even outperforming its v2 model on our task). To reduce latency for deployment, we implemented optimizations like Pytorch model compilation. We release the evaluation data consisting of 2,426 queries and 225,618 passages. + +# 4.3 Generation + +We evaluate the final output of Scholar QA on the ScholarQA-CS dataset which consists of expert-annotated rubrics for 100 Computer Science research questions. The question-specific expert rubrics account for $60\%$ of the final score, while the rest is computed based on global metrics of length, expertise and citations. We use GPT-4o (Hurst et al., 2024) as a judge with the utility provided by Asai et al. (2024) for automatic evaluation and compare against several baselines. + +As shown in Table 2, our system outperforms popular LLMs: Llama 3.1 (Dubey et al., 2024), GPT 4.1 and Claude Sonnet 3.7 (Anthropic, 2024). It even outperforms reasoning models such as Sonnet 3.7 Thinking (Anthropic, 2025), o1-mini (OpenAI, 2024b) and o3-mini (Zhang et al., 2025) overall on the Scholar QA-CS benchmark. This setup lacks any retrieval so the models generate the responses completely from parametric memory. The benchmark rewards attribution and supporting evidence as a measure of trust in the system, so these models score lower overall. The reasoning based models perform better than our system on the rubrics score, which suggests that they may be superior backbones for our system. However, due to the additional reasoning tokens, these models are more expensive and also significantly increase latency. + +For contemporary QA systems, we compare against OpenScholar with GPT-4o $^{4}$ , PaperQA2 (Skarlinski et al., 2024), Perplexity's Sonar Deep Research and STORM (Shao et al., 2024a). PaperQA2 did not release their retrieval corpus, so we substitute it with our retrieval pipeline for a fair comparison. Scholar QA obtains the best scores both on rubrics and overall, with the variant using Claude 3.7 Sonnet as the backbone scoring 2.4 points higher than STORM. For these QA systems, we also evaluate the attribution quality based on ALCE (Gao et al., 2023), which proposes entailment between claims and evidence to compute citation precision and recall. Again, we use GPT-4o as a judge to predict entailment (See Appendix F for the prompt) and treat each sentence in a response as a claim. Even with a report spanning multiple sections where all the sentences might not be cited, Scholar QA comes out far ahead of the other QA systems. Due to a lack of retrieval, this evaluation was not conducted when the LLMs are simply prompted to generate a response from memory. An interesting discovery from our analysis was that with an updated version of GPT-4o (i.e. gpt-4o-2024-11-20) as the judge, the scores are inflated compared to using gpt-4o-2024-08-06, even though the relative rankings are consistent (See Appendix J). For parity with Asai et al. (2023), we report the rubrics and citation scores with the older and newer model as the judge, respectively. + +During our initial experiments, we restricted ScholarQA to only summarize the insights conditioned on the quotes extracted from retrieved passages. However, in cases where the retrieved passages were not relevant enough, the system failed to answer the question in favor of just discussing the information in the quotes. Moreover, for over $30\%$ of instances in ScholarQA-CS, the rubrics require background information, even though the question might not. So, we updated our system LLM prompts to - a) Generate section text from memory if there is a lack of relevant retrieved passages and cite as LLM Memory and b) generate the first section as a background or introduction for the rest of the answer. The results reported here are obtained post these changes. + +To finalize the backbone LLM for the production web application we conducted an anonymized pair + +
ModelScoreModelScore
RubricsTotalRubricsTotalCite
LLM Prompt (No Retrieval)QA Systems
Llama 3.1-8B48.847.3SQA-Claude 3.7 S58.061.948.1
Llama 3.1-70B52.448.6SQA-Claude 3.5 S52.661.352.1
Claude 3.5 S50.446.6OS-GPT-4o49.353.525.9
Claude 3.7 S61.555.9PaperQA238.751.425.3
+Thinking62.755.7Perplex. Sonar DR38.752.825.2
GPT-4.163.256.2STORM54.259.540.2
o1-mini62.355.5
o3-mini60.650.2
+ +Table 2: Evaluation results on ScholarQA-CS benchmark. System responses are either generated by simply prompting LLMs with the questions or by issuing the queries to RAG based QA systems. Expert annotated rubrics only scores are reported in addition to the overall total. The overall best results are highlighted and best results within a category are underlined. SQA: Ai2 Scholar QA, OS: Open Scholar, S: Sonnet, Claude 3.5 S: claude-3-5-sonnet-20241022. + +wise comparison among the authors of this work. We compare Claude 3.7 against 3.5. Out of 18 comparisons, Claude 3.7 Sonnet was the overwhelming favorite with 17 wins, reinforcing our hypothesis that (with no other changes) our system improves with newer and better backbone LLMs. + +# 4.4 Real-world Usage and User Feedback + +We have publicly deployed Scholar QA for 9 weeks, and received $30.2\mathrm{k}$ questions from 8,219 unique visitors. On average, each response is about $2.4\mathrm{k}$ words and costs $0.50 to produce. We observed 1,075 monthly repeated users who had issued queries on two distinct days over the course of a 30 day window. We analyze the user query types and the most prominent themes were deep-dive into specific research topics (15k) and comparative analysis of specific prior work (5k) (detailed distribution in Appendix I). A total of 2,433 thumbs feedback were submitted (Figure 2 [A]) and $85\%$ were positive. These suggests real-world users benefited from using Scholar QA. + +For insight into the failure modes, we manually examined the 383 instances of neutral/negative freeform feedback. Table 3 lists the feedback types we identified along with their counts as of May 2025 (example feedback in Appendix G). We hypothesize that follow-up questions may help address insufficient answer detail and cases with a lack of retrieved documents, while improved retrieval may help address incomplete or incorrect references and off-topic responses. + +
CategoryCount
Incorrect or Missing References126
Off-topic or Misunderstood Query113
Request for More Detail or Specificity289
General Feedback on Quality149
Language or Format Issues78
+ +Table 3: Feedback Categories and Counts + +# 5 Related Work + +Scientific Question Answering. Answering scientific questions involves navigating scholarly sources and accurately retrieving and synthesizing them. Recently, OpenScholar (Asai et al., 2024) introduced a retrieval-augmented model designed explicitly for scientific literature synthesis with citation-supported responses with significant improvement in accuracy and reduced citation hallucination. Scholar QA extends its capabilities by leveraging the latest state-of-the-art LLMs and an open source generation pipeline that filters literature into precise quotes and produces thematically organized and detailed answers. STORM (Shao et al., 2024b) synthesizes comprehensive, Wikipedia-like articles, a distinct task from long-form scientific question answering. Other works have focused on literature review synthesis: LitLLM (Agarwal et al., 2024), which like Scholar QA uses a structured planning-and-generation pipeline similar, and SurveyForge (Yan et al., 2025), which outlines heuristics before generation. Their code was not available at the time of our evaluation. Zhou et al. (2025) present a survey categorizing AI-driven research support systems across various stages of the scientific process, including literature synthesis. + +Commercial Tools for Scientific QA. Commercial RAG tools have emerged to facilitate research specifically tailored for scientific literature, such as Consensus (Consensus, 2024), which synthesizes findings from research papers, Scite (Scite, 2024), which evaluates claims by analyzing citation contexts, and Elicit (Elicit, 2024), which supports structured scientific literature reviews. Other general-purpose tools also support scientific inquiries: Perplexity (Perplexity, 2024), You.com (You.com, 2024), OpenAI Deep Research (OpenAI, 2024a) and Gemini Deep Research (DeepMind, 2024). Although these platforms leverage advanced retrieval and generation capabilities to facilitate literature reviews and deliver rapid insights, + +they can be too expensive for widespread academic use and typically lack transparency regarding their pipelines. In contrast, Scholar QA is free with open sourced code and access to search APIs that enable the research community to build upon it. + +# 6 Conclusion + +We present Ai2 Scholar QA, a freely-available longform literature synthesis system that generates reports for complex scientific questions. We release key components as open source code and public APIs, and report experiments analyzing design decisions and demonstrate state-of-the-art results. + +# Limitations + +Supplementing the user feedback discussed in subsection 4.4, we would like to outline some limitations of our system and evaluation and our plans to mitigate them as part of fuvre work: + +(i) Ai2 Scholar QA uses proprietary and closed-source LLM as the backbone for our production pipeline. As shown in Table 2, open source models lag behind the proprietary models in our evaluation. However, we are actively experimenting with open-sourced LLMs to replace the closed ones partially or completely in the pipeline. The open-sourced models will be specifically trained to do well on long-form scientific question answering and each of the sub-tasks in our multi-step generation. Further, our code is open-sourced and can easily be used with potentially any available LLM api provider supported by litellm. + +(ii) We evaluate the answers generated by Scholar QA and compare against other systems on ScholarQA-CS dataset in subsection 4.3. Even though the answer rubrics are collected via human annotation, the evaluation is only limited to questions in the Computer Science domain and further relies completely on an LLM as the evaluator. In ongoing work, we are investigating more accurate benchmarks for evaluating long form scientific answers. Our approach uses real queries posed by users to Scholar QA, and human preference labels over answers from multiple systems in not just Computer Science, but Biomedicine and other scientific domains. These labels can serve as not only for evaluation, but also as training signals for models. + +# Acknowledgments + +We would like to thank the anonymous reviewers for helpful comments, suggestions and feedback on the manuscript. We would also like to acknowledge the Ai2 ScholarQA users for providing constructive feedback that helped us improve the system. Finally, we thank David Albright for helping with the demo video, the Ai2 communications team for their help with user outreach, and Ai2 engineers and researchers for their help with user testing before launch. + +# References + +Shubham Agarwal, Gaurav Sahu, Abhay Puri, Issam Hadj Laradji, Krishnamurthy Dj Dvijotham, Jason Stanley, Laurent Charlin, and Christopher Pal. 2024. Litllms, llms for literature review: Are we there yet? +Anthropic. 2024. The claude 3 model family: Opus, sonnet, haiku. +Anthropic. 2025. Claude 3.7 sonnet system card. +Akari Asai, Jacqueline He, Rulin Shao, Weijia Shi, Amanpreet Singh, Joseph Chee Chang, Kyle Lo, Luca Soldaini, Sergey Feldman, Mike D'Arcy, David Wadden, Matt Latzke, Minyang Tian, Pan Ji, Shengyan Liu, Hao Tong, Bohao Wu, Yanyu Xiong, Luke S. Zettlemoyer, and 6 others. 2024. Openscholar: Synthesizing scientific literature with retrieval-augmented lms. ArXiv, abs/2411.14199. +Akari Asai, Zeqiu Wu, Yizhong Wang, Avirup Sil, and Hannaneh Hajishirzi. 2023. Self-rag: Learning to retrieve, generate, and critique through self-reflection. ArXiv, abs/2310.11511. +Jianlv Chen, Shitao Xiao, Peitian Zhang, Kun Luo, Defu Lian, and Zheng Liu. 2024. Bge m3-embedding: Multi-lingual, multi-functionality, multi-granularity text embeddings through self-knowledge distillation. Preprint, arXiv:2402.03216. +Consensus. 2024. Consensus - ai for research. Accessed: 2025-03-28. +Google DeepMind. 2024. Gemini - deep research mode. Accessed: 2025-03-28. +Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Amy Yang, Angela Fan, Anirudh Goyal, Anthony S. Hartshorn, Aobo Yang, Archi Mitra, Archie Sravankumar, Artem Korenev, Arthur Hinsvark, Arun Rao, Aston Zhang, and 510 others. 2024. The llama 3 herd of models. ArXiv, abs/2407.21783. +Elicit. 2024. Elicit - the ai research assistant. Accessed: 2025-03-28. + +Tianyu Gao, Howard Yen, Jiatong Yu, and Danqi Chen. 2023. Enabling large language models to generate text with citations. In Conference on Empirical Methods in Natural Language Processing. +OpenAI Aaron Hurst, Adam Lerer, Adam P. Goucher, Adam Perelman, Aditya Ramesh, Aidan Clark, AJ Ostrow, Akila Welihinda, Alan Hayes, Alec Radford, Aleksander Mkadry, Alex Baker-Whitcomb, Alex Beutel, Alex Borzunov, Alex Carney, Alex Chow, Alexander Kirillov, Alex Nichol, Alex Paino, and 397 others. 2024. Gpt-4o system card. ArXiv, abs/2410.21276. +Rodney Michael Kinney, Chloe Anastasiades, Russell Authur, Iz Beltagy, Jonathan Bragg, Alexandra Buraczynski, Isabel Cachola, Stefan Candra,oganand Chandrasekhar, Arman Cohen, Miles Crawford, Doug Downey, Jason Dunkelberger, Oren Etzioni, Rob Evans, Sergey Feldman, Joseph Gorney, David W. Graham, F.Q. Hu, and 29 others. 2023. The semantic scholar open data platform. *ArXiv*, abs/2301.10140. +Weize Kong, Jeffrey M. Dudek, Cheng Li, Mingyang Zhang, and Michael Bendersky. 2023. Sparseembed: Learning sparse lexical representations with contextual embeddings for retrieval. In Proceedings of the 46th International ACM SIGIR Conference on Research and Development in Information Retrieval, SIGIR '23, page 2399-2403. ACM. +Aditya Kusupati, Gantavya Bhatt, Aniket Rege, Matthew Wallingford, Aditya Sinha, Vivek Ramanujan, William Howard-Snyder, Kaifeng Chen, Sham Kakade, Prateek Jain, and Ali Farhadi. 2024. Matryoshka representation learning. Preprint, arXiv:2205.13147. +Sean Lee, Aamir Shakir, Darius Koenig, and Julius Lipp. 2024. Open source strikes bread - new fluffy embeddings model. +Patrick Lewis, Ethan Perez, Aleksandara Piktus, Fabio Petroni, Vladimir Karpukhin, Naman Goyal, Heinrich Kuttler, Mike Lewis, Wen tau Yih, Tim Rocktäschel, Sebastian Riedel, and Douwe Kiela. 2020. Retrieval-augmented generation for knowledge-intensive nlp tasks. ArXiv, abs/2005.11401. +Kyle Lo, Lucy Lu Wang, Mark Neumann, Rodney Michael Kinney, and Daniel S. Weld. 2020. S2orc: The semantic scholar open research corpus. In Annual Meeting of the Association for Computational Linguistics. +Niklas Muennighoff, Nouamane Tazi, Loic Magne, and Nils Reimers. 2022. Mteb: Massive text embedding benchmark. In Conference of the European Chapter of the Association for Computational Linguistics. +Benjamin Newman, Yoonjoo Lee, Aakanksha Naik, Pao Siangliulue, Raymond Fok, Juho Kim, Daniel S. Weld, Joseph Chee Chang, and Kyle Lo. 2024. Arxiv digestables: Synthesizing scientific literature into tables using language models. In Conference on Empirical Methods in Natural Language Processing. + +Jakob Nielsen. 1994. Enhancing the explanatory power of usability heuristics. In Proceedings of the SIGCHI conference on Human Factors in Computing Systems, pages 152-158. +OpenAI. 2024a. Chatgpt - deep research mode. Accessed: 2025-03-28. +OpenAI. 2024b. Openai o1 system card. +Perplexity. 2024. Perplexity ai - ask anything. Accessed: 2025-03-28. +Nils Reimers and Iryna Gurevych. 2019. Sentence-bert: Sentence embeddings using siamese bert-networks. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing. Association for Computational Linguistics. +Scite. 2024. Scite - smart citations for research. Accessed: 2025-03-28. +Aamir Shakir, Darius Koenig, Julius Lipp, and Sean Lee. 2024. Boost your search with the crispy mixedbread rerank models. +Yijia Shao, Yucheng Jiang, Theodore Kanell, Peter Xu, Omar Khattab, and Monica Lam. 2024a. Assisting in writing Wikipedia-like articles from scratch with large language models. In Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), pages 6252-6278, Mexico City, Mexico. Association for Computational Linguistics. +Yijia Shao, Yucheng Jiang, Theodore A. Kanell, Peter Xu, Omar Khattab, and Monica S. Lam. 2024b. Assisting in writing wikipedia-like articles from scratch with large language models. Preprint, arXiv:2402.14207. +Michael D. Skarlinski, Sam Cox, Jon M. Laurent, James D. Braza, Michaela M. Hinks, Michael J Hammerling, Manvitha Ponnapati, Samuel G. Rodriques, and Andrew D. White. 2024. Language agents achieve superhuman synthesis of scientific knowledge. ArXiv, abs/2409.13740. +Aviv Slobodkin, Eran Hirsch, Arie Cattan, Tal Schuster, and Ido Dagan. 2024. Attribute first, then generate: Locally-attributable grounded text generation. In Annual Meeting of the Association for Computational Linguistics. +Aivin V. Solatorio. 2024. Gistembed: Guided in-sample selection of training negatives for text embedding fine-tuning. ArXiv, abs/2402.16829. +Saba Sturua, Isabelle Mohr, Mohammad Kalim Akram, Michael Gunther, Bo Wang, Markus Kimmel, Feng Wang, Georgios Mastrupas, Andreas Koukounas, Andreas Koukounas, Nan Wang, and Han Xiao. 2024. jina-embeddings-v3: Multilingual embeddings with task lora. Preprint, arXiv:2409.10173. + +Liang Wang, Nan Yang, Xiaolong Huang, Binxing Jiao, Linjun Yang, Daxin Jiang, Rangan Majumder, and Furu Wei. 2022. Text embeddings by weakly-supervised contrastive pre-training. arXiv preprint arXiv:2212.03533. +Xiangchao Yan, Shiyang Feng, Jiakang Yuan, Renqiu Xia, Bin Wang, Bo Zhang, and Lei Bai. 2025. Surveyforge: On the outline heuristics, memory-driven generation, and multi-dimensional evaluation for automated survey writing. +You.com. 2024. You.com - personalized ai search. Accessed: 2025-03-28. +Brian Zhang, Eric Mitchell, Hongyu Ren, Kevin Lu, Max Schwarzer, Michelle Pokrass, Shengjia Zhao, Ted Sanders, Adam Kalai, Alexandre Passos, Benjamin Sokolowsky, Elaine Ya Le, Erik Ritter, Hao Sheng, Hanson Wang, Ilya Kostrikov, James Lee, Johannes Ferstad, Michael Lampe, and 93 others. 2025. Openai o3-mini system card. +Zekun Zhou, Xiaocheng Feng, Lei Huang, Xiachong Feng, Ziyun Song, Ruihan Chen, Liang Zhao, Weitao Ma, Yuxuan Gu, Baoxin Wang, Dayong Wu, Guoping Hu, Ting Liu, and Bing Qin. 2025. From hypothesis to publication: A comprehensive survey of ai-driven research support systems. + +# A Python Package Usage + +Figure 5 shows a minimal example of running the system pipeline with the ai2-scholar-qa python package and how every component can be extended or modified as the users see fit. + +```python +from scholarqa rag. reranker. reranker_base import CrossEncoderScores +from scholarqa rag. retrieval import PaperFinderWithReranker +from scholarqa rag. retriever_base import FullTextRetriever +from scholarqa import ScholarQA +CLAUSE_SONNET_3_7 = "anthropic/clause-3-7-sonnet-20250219" +#Extends the scholarqa rag. retrieval.AAbstractRetriever class +retriever $=$ FullTextRetriever(n_retrieval=256, n_keyword_shrc=20) +#Extends the scholarqa rag. reranker. reranker_base.AAbstractReranker class +reranker $=$ CrossEncoderScores("mixedbread-ai/mxbai-erank-large-v1") +#Wrapper class for retrieval +paper_find $\equiv$ PaperFinderWithReranker(retriever, reranker, n_ rerank=50, context_threshold=0.5) +#Scholar QA wrapper with the MultiStepQAPipeline integrated +scholar_qa $=$ ScholarQA(paper_find, llm_model $\coloneqq$ CLAUSEDSONNET_3_7) +print(scholar_qa answer_query("Which is the 9th planet in our solar system?")) +#Custom MultiStepQAPipeline class/steps +from scholarqa rag. multi_step_qapipeline import MultiStepQAPipeline +mqapipeline $=$ MultiStepQAPipeline(llm_model $\coloneqq$ CLAUSEDSONNET_3_7) +paperquotes $=$ mqapipeline step_select Quotes(query,...)#Quote Extraction +plan $=$ mqapipeline step_clustering(query, paperquotes,...)#Outline and Clustering +#Section Generation +response $=$ list(mqapipeline generate_iterations.summary(query, paperquotes, plan,...)) +``` + +# B Document Relevance Prompt + +We used the following prompt to obtain binary relevance labels, which agreed with human annotators $80\%$ of the time: + +If any part of the following text is relevant to the following question, then return 1, otherwise return 0. Non-english results are not relevant, results which are primarily tables are not relevant. + +# C Retrieval Tuning Query Generation + +Queries for the dev set were obtained from three internal sources of human research questions, and a set of LLM generations. We experimented with several methods for constructing the synthetic LLM questions. Our approach was to generate questions similar to those asked by real users by prompting the LLM to output: (1) a question based on paragraphs retrieved from the corpus, and (2) a "more general" version of the first question. We only use the "more general" set since they were more similar to real user queries. + +# D Embedding Models for Retrieval + +We experimented with multiple top embedding models from the MTEB leader board to optimize retrieval for our system. These are outlined in Table 4. + +
HuggingFace embedding model name
Snowflake/snowflake-arctic-embed-m5
sentence-transformers/all-mpnet-base-v2 (Reimers and Gurevych, 2019)
avsolatorio/GIST-Embedding-v0 (Solatorio, 2024)
Snowflake/snowflake-arctic-embed-m-long6
intfloat/e5-base-v2 (Wang et al., 2022)
mixedbread-ai/mxbai-embed-large-v1 (Lee et al., 2024)
jinaai/jina-embeddings-v3 (Sturua et al., 2024)
+ +Table 4: Embedding Models to optimize retrieval + +# E Retrieval Ensemble Experiments + +Figure 6 shows results of our ensembling experiments for the full-text retrieval index. SparseEmbed introduces an overhead with minimal performance gains, so we picked an ensemble of embedding similarity and BM25 as our final ranking metric. + +![](images/01163041117b4702addfc2828f34282b7c07a400177ffc2748176f9968e4d482.jpg) +Figure 5: ai2-scholar-qa usage example +Figure 6: Ranking performance for various ensembles with relative size of the index required. Excluding SparseEmbed reduces the index size by $20\%$ without a significant drop in ranking performance. + +# F Prompt for Evaluating Attribution + +As an Attribution Validator, your task is to verify whether a given reference can support the given claim. A claim can be either a plain sentence or a question followed by its answer. Specifically, your response should clearly indicate the relationship: Attributable, Contradictory or Extrapolatory. A contradictory error occurs when you can infer that the answer contradicts the fact presented in the context, while an extrapolatory error means that you cannot infer the correctness of the answer based on the information provided in the context. Output your response as a json with only a single key "output" and a value of one among ("Attributable", "Contradictory", + +"Extrapolatory"). +Claim: claim +Reference: ref_excerpt + +# G User Feedback Examples + +Table 5 lists some examples of the user complaints for Scholar QA reports. + +
Feedback
The structure is good, but the articles you choose are not from top journals.
The first citation says that rabbits can obtain cholesterol from diet, not rats.
These provide a lot of general information about the topic, but nothing here actually addresses the central question I asked.
The answer did not address the ‘MOBILIZATION’ techniques at all! The answer is wrong because it addressed Exercise therapy!
They address the general setting, but not the specific question I asked.
It’s only analysing on SASAF model, but there are more.
+ +# H Progress Updates and Report Sections + +Figure 7 demonstrates how we display in real-time the progress of the system during generation. This included number of papers and passages the were processed in each step, as well as the outline as it is being generated. Each section appears as soon as it is generated, so users can begin browsing the first sections. + +![](images/ce3b077c2ebdcf5f7466cd6d35673d3f0ab2172dcdb52e8846aace40f300e321.jpg) +Figure 7: Progress indication and section streaming. + +# I Query Type Analysis + +To analyze the types of questions users are asking, we use an LLM to categorize the queries. The most + +![](images/daf247f1ede05b107084c0cddaa32843207f0c0fac484c5bea2ddc5880c2935f.jpg) +Figure 8: Distribution of different question types submitted to Scholar QA deployed Web application. + +prominent types were comprehensive deep-dive into a specific research topic (15k) and comparative analysis of prior work (5k). Other themes such as factoid QA or specific methods, datasets accounted for fewer queries. + +# J Generation Results with updated GPT-40 + +Table 6 shows results on ScholarQA-CS with gpt-4o-2024-11-20 as the LLM judge. These results can be contrasted with the first two columns in Table 2 which are obtained with gpt-4o-2024-08-06 as the judge. Even though the absolute scores are inflated compared to Table 2, the relative rankings are about the same with Scholar QA getting the best overall score. + +Table 5: Example Feedback on Research Issues + +
ModelScoreModelScore
RubricsTotalRubricsTotal
LLM Prompting (No Retrieval)QA Systems
Llama 3.1-8B51.848.2SQA-Claude 3.7 S67.367.2
Llama 3.1-70B57.051.2SQA-Claude 3.5 S61.367.1
Claude 3.5 S57.851.3OS-GPT-4o54.959.9
Claude 3.7 S68.460.8PaperQA243.854.1
+Thinking68.358.7Perplex. Sonar DR43.956.0
GPT-4.169.361.8STORM59.264.7
o1-mini69.161.3
o3-mini68.555.9
+ +Table 6: Evaluation results on ScholarQA-CS benchmark with gpt-4o-2024-11-20 as the judge. System responses are either generated by simply prompting LLMs with the questions or by issuing the queries to RAG based QA systems. Expert annotated rubrics only scores are reported in addition to the overall total. The overall best results are highlighted and best results within a category are underlined. SQA: Ai2 Scholar QA, OS: Open Scholar, S: Sonnet, Claude 3.5 S: claude-3-5-sonnet-20241022. \ No newline at end of file diff --git a/data/2025/2504_10xxx/2504.10861/images/01163041117b4702addfc2828f34282b7c07a400177ffc2748176f9968e4d482.jpg b/data/2025/2504_10xxx/2504.10861/images/01163041117b4702addfc2828f34282b7c07a400177ffc2748176f9968e4d482.jpg new file mode 100644 index 0000000000000000000000000000000000000000..810b21e4fb8239eca2c4082ae12f32e3088767e1 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10861/images/01163041117b4702addfc2828f34282b7c07a400177ffc2748176f9968e4d482.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e89f30b97840926ed6094b39e9b63e2db7bf1ceb1487f9cfa00b6c45d6c7d197 +size 23225 diff --git a/data/2025/2504_10xxx/2504.10861/images/1615ec25e8c63ce70bbe3adc92df8285d9e22d8f3252708cf9a5a2e898598511.jpg b/data/2025/2504_10xxx/2504.10861/images/1615ec25e8c63ce70bbe3adc92df8285d9e22d8f3252708cf9a5a2e898598511.jpg new file mode 100644 index 0000000000000000000000000000000000000000..195cb77b8fddd22eb0fb95105715e5ceeaf9418c --- /dev/null +++ b/data/2025/2504_10xxx/2504.10861/images/1615ec25e8c63ce70bbe3adc92df8285d9e22d8f3252708cf9a5a2e898598511.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:936812d457ccedadcf61db631a1e9795e3aa33f9e934202cdcbdac426af46baf +size 32035 diff --git a/data/2025/2504_10xxx/2504.10861/images/26e2dc0222a124557de7ef89388bbab16efa7595101dd2575acfdafeb460f0ba.jpg b/data/2025/2504_10xxx/2504.10861/images/26e2dc0222a124557de7ef89388bbab16efa7595101dd2575acfdafeb460f0ba.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2930857526a26871979e27a603f1dd25fe93f0d1 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10861/images/26e2dc0222a124557de7ef89388bbab16efa7595101dd2575acfdafeb460f0ba.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4e7dc86bb7f00c6f292519d1585676514fb9d885845ea0c07c070a076765b708 +size 53038 diff --git a/data/2025/2504_10xxx/2504.10861/images/38a8653ec599447018cb8bf10426f6f449740837923b31257f25fb40d6962518.jpg b/data/2025/2504_10xxx/2504.10861/images/38a8653ec599447018cb8bf10426f6f449740837923b31257f25fb40d6962518.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2e77059dc02a2ef19c3bd50bfa6c570a32642eca --- /dev/null +++ b/data/2025/2504_10xxx/2504.10861/images/38a8653ec599447018cb8bf10426f6f449740837923b31257f25fb40d6962518.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9be467f27a6d6e5583e47c17a34de353236c5d3207b39f50d536fa74a8c9e5bf +size 64488 diff --git a/data/2025/2504_10xxx/2504.10861/images/57dca823c8b7ae7cfba9806c2ff0b51209622a04e30dde754ec83a334852b024.jpg b/data/2025/2504_10xxx/2504.10861/images/57dca823c8b7ae7cfba9806c2ff0b51209622a04e30dde754ec83a334852b024.jpg new file mode 100644 index 0000000000000000000000000000000000000000..415f81c6a9ff0e4d2387162a772e939013992929 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10861/images/57dca823c8b7ae7cfba9806c2ff0b51209622a04e30dde754ec83a334852b024.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b81b98c01a1f0911e0cee5d99cc44bcfea9f196ab4b101c76474cd50f4f7fca8 +size 1068 diff --git a/data/2025/2504_10xxx/2504.10861/images/81b8047cfb6dc431ad8aec3f0c23b80473c05396747e736928043641fcddda80.jpg b/data/2025/2504_10xxx/2504.10861/images/81b8047cfb6dc431ad8aec3f0c23b80473c05396747e736928043641fcddda80.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c571c5a02b3a592bb68a445352c510208cd21e97 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10861/images/81b8047cfb6dc431ad8aec3f0c23b80473c05396747e736928043641fcddda80.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c6dfb28b1733bec5e3f702c7664e1f2e513e25a7efc42d45084cab56f54e738d +size 20044 diff --git a/data/2025/2504_10xxx/2504.10861/images/949e3a4965caf39889b4cadbd454b66dcf6db9c4bb79bec0c563cbfa0d7df351.jpg b/data/2025/2504_10xxx/2504.10861/images/949e3a4965caf39889b4cadbd454b66dcf6db9c4bb79bec0c563cbfa0d7df351.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b546f2830c18902d1b8d094796005593c661a017 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10861/images/949e3a4965caf39889b4cadbd454b66dcf6db9c4bb79bec0c563cbfa0d7df351.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:35d5abd067015b73379602206a1000a6b75bcefa3454a69a01d23064e36a8b6f +size 981 diff --git a/data/2025/2504_10xxx/2504.10861/images/9c0755d3a040f8844c31873f60f1ce98f75706719db24c2384f489c70c87a910.jpg b/data/2025/2504_10xxx/2504.10861/images/9c0755d3a040f8844c31873f60f1ce98f75706719db24c2384f489c70c87a910.jpg new file mode 100644 index 0000000000000000000000000000000000000000..54e1a400025aff5f3ae00c086447d2b095544740 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10861/images/9c0755d3a040f8844c31873f60f1ce98f75706719db24c2384f489c70c87a910.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4916f6745d3d691caf03e24661a7dd6886cfd3443d435b56002fc5a87744755f +size 40762 diff --git a/data/2025/2504_10xxx/2504.10861/images/adc1effc9e55d3dbeb3ab620fb48b91360028f713edb853bede83ee8f1953340.jpg b/data/2025/2504_10xxx/2504.10861/images/adc1effc9e55d3dbeb3ab620fb48b91360028f713edb853bede83ee8f1953340.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8b2c2099344b59ae8fc8869a85e160955556644b --- /dev/null +++ b/data/2025/2504_10xxx/2504.10861/images/adc1effc9e55d3dbeb3ab620fb48b91360028f713edb853bede83ee8f1953340.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f8f1900d0723b313c736f88455592ca8cce706b93e44dbf573f847547016ae2f +size 40968 diff --git a/data/2025/2504_10xxx/2504.10861/images/b489c9848f844e933df64a0bc9c22d644c9e8218729b4c01c657c8dd7c272c9f.jpg b/data/2025/2504_10xxx/2504.10861/images/b489c9848f844e933df64a0bc9c22d644c9e8218729b4c01c657c8dd7c272c9f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f24dba73bc2cca88af6e8e4cb0e85c6e44b5bddd --- /dev/null +++ b/data/2025/2504_10xxx/2504.10861/images/b489c9848f844e933df64a0bc9c22d644c9e8218729b4c01c657c8dd7c272c9f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:84c4d574b0b70ef0a30ba9daa3ff83ee63cb6903f890e73ba4ddbd5d059387a5 +size 860 diff --git a/data/2025/2504_10xxx/2504.10861/images/b5df53f86fb06f99e0f4351fefeea2efe4c0a077e930e6c8769c098085df09e0.jpg b/data/2025/2504_10xxx/2504.10861/images/b5df53f86fb06f99e0f4351fefeea2efe4c0a077e930e6c8769c098085df09e0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b3d27262ae23affd1f77ae48da098790de1715a8 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10861/images/b5df53f86fb06f99e0f4351fefeea2efe4c0a077e930e6c8769c098085df09e0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7192a26be75bcbc2ab70291a908fbb32e386c3399d3237385102999bea35cfba +size 77137 diff --git a/data/2025/2504_10xxx/2504.10861/images/b8d80b8b7ef2cc0d21f8dccb37dcca884443d727a60e15f37512e39fd2a5eff8.jpg b/data/2025/2504_10xxx/2504.10861/images/b8d80b8b7ef2cc0d21f8dccb37dcca884443d727a60e15f37512e39fd2a5eff8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d6651dcd83c533a2dd634852c9951afeff3b3e07 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10861/images/b8d80b8b7ef2cc0d21f8dccb37dcca884443d727a60e15f37512e39fd2a5eff8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bfada965bab4e950f7da2b9ab57a32ab843983942841e23afd0876caa616294d +size 65747 diff --git a/data/2025/2504_10xxx/2504.10861/images/c9a8ba26e2c2e5a83296e69aea82c8dcfe28e652909eb1832eb214e2af5358ec.jpg b/data/2025/2504_10xxx/2504.10861/images/c9a8ba26e2c2e5a83296e69aea82c8dcfe28e652909eb1832eb214e2af5358ec.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6310c0491260bad292f4872422bca839c8dc03a0 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10861/images/c9a8ba26e2c2e5a83296e69aea82c8dcfe28e652909eb1832eb214e2af5358ec.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:428fe280786e0d56c773e81401cb4787a0da64ebb5c50837d5753c0daeef05fc +size 241372 diff --git a/data/2025/2504_10xxx/2504.10861/images/ce3b077c2ebdcf5f7466cd6d35673d3f0ab2172dcdb52e8846aace40f300e321.jpg b/data/2025/2504_10xxx/2504.10861/images/ce3b077c2ebdcf5f7466cd6d35673d3f0ab2172dcdb52e8846aace40f300e321.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1c558bf35b057ef0fc7ef029406d1e5d65df4a22 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10861/images/ce3b077c2ebdcf5f7466cd6d35673d3f0ab2172dcdb52e8846aace40f300e321.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a17800807eaece9148514cca5f10bb0cf73ccb6e4c0f10939b16616600307b12 +size 68428 diff --git a/data/2025/2504_10xxx/2504.10861/images/daf247f1ede05b107084c0cddaa32843207f0c0fac484c5bea2ddc5880c2935f.jpg b/data/2025/2504_10xxx/2504.10861/images/daf247f1ede05b107084c0cddaa32843207f0c0fac484c5bea2ddc5880c2935f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..90bd4539214280d469aa4e79dbd121a5fded66ce --- /dev/null +++ b/data/2025/2504_10xxx/2504.10861/images/daf247f1ede05b107084c0cddaa32843207f0c0fac484c5bea2ddc5880c2935f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0ece19b19434a8b00643e5707508879ae75cc5d25f6448875cd873f701859249 +size 18458 diff --git a/data/2025/2504_10xxx/2504.10861/images/dbabd07881d038bbba4ce14a7b5fd344bdba0571ccbedb609f2101aa9a734980.jpg b/data/2025/2504_10xxx/2504.10861/images/dbabd07881d038bbba4ce14a7b5fd344bdba0571ccbedb609f2101aa9a734980.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ae2887a77cd3dd14df52986bafeb6a644efd207d --- /dev/null +++ b/data/2025/2504_10xxx/2504.10861/images/dbabd07881d038bbba4ce14a7b5fd344bdba0571ccbedb609f2101aa9a734980.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:457e83137cc8a136557b4e71ef8ac3e9f04f7c25a7e9e1e08f20a6cbc51f19ff +size 1100 diff --git a/data/2025/2504_10xxx/2504.10861/images/ec626c759b741cd79197869ce8ff27fd318689aae8160e428396774589d3982f.jpg b/data/2025/2504_10xxx/2504.10861/images/ec626c759b741cd79197869ce8ff27fd318689aae8160e428396774589d3982f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..df0cfd042ce5fe7084a85dada60618580c664663 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10861/images/ec626c759b741cd79197869ce8ff27fd318689aae8160e428396774589d3982f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ae177e3fca25b82a7fe79d31da8e72d27dfbd845ab5a175a23f55f4a0e17c57e +size 38441 diff --git a/data/2025/2504_10xxx/2504.10861/layout.json b/data/2025/2504_10xxx/2504.10861/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..a8eff57974a8b3f879564fb7ba01304172848461 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10861/layout.json @@ -0,0 +1,6678 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 79, + 79, + 514, + 97 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 79, + 514, + 97 + ], + "spans": [ + { + "bbox": [ + 79, + 79, + 514, + 97 + ], + "type": "inline_equation", + "content": "\\diamond" + }, + { + "bbox": [ + 79, + 79, + 514, + 97 + ], + "type": "text", + "content": " Ai2 Scholar QA: Organized Literature Synthesis with Attribution" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 71, + 110, + 514, + 166 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 110, + 514, + 166 + ], + "spans": [ + { + "bbox": [ + 71, + 110, + 514, + 166 + ], + "type": "text", + "content": "Amanpreet Singh* Joseph Chee Chang* Chloe Anastasiades* Dany Haddad* Aakanksha Naik Amber Tanaka Angele Zamarron Cecile Nguyen Jena D. Hwang Jason Dunkleberger Matt Latzke Smita Rao Jaron Lochner Rob Evans Rodney Kinney Daniel S. Weld Doug Downey* Sergey Feldman*" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 245, + 169, + 348, + 181 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 245, + 169, + 348, + 181 + ], + "spans": [ + { + "bbox": [ + 245, + 169, + 348, + 181 + ], + "type": "text", + "content": "Allen Institute for AI" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 224, + 185, + 369, + 196 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 224, + 185, + 369, + 196 + ], + "spans": [ + { + "bbox": [ + 224, + 185, + 369, + 196 + ], + "type": "text", + "content": "{amanpreets, sergey}@allenai.org" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 155, + 219, + 202, + 232 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 155, + 219, + 202, + 232 + ], + "spans": [ + { + "bbox": [ + 155, + 219, + 202, + 232 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 84, + 241, + 274, + 421 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 241, + 274, + 421 + ], + "spans": [ + { + "bbox": [ + 84, + 241, + 274, + 421 + ], + "type": "text", + "content": "Retrieval-augmented generation is increasingly effective in answering scientific questions from literature, but many state-of-the-art systems are expensive and closed-source. We introduce Ai2 Scholar QA, a free online scientific question answering application. To facilitate research, we make our entire pipeline public: as a customizable open-source Python package1 and interactive web app, along with paper indexes accessible through public APIs and downloadable datasets. We describe our system in detail and present experiments analyzing its key design decisions. In an evaluation on a recent scientific QA benchmark, we find that Ai2 Scholar QA outperforms competing systems." + } + ] + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 91, + 423, + 105, + 436 + ], + "blocks": [ + { + "bbox": [ + 91, + 423, + 105, + 436 + ], + "lines": [ + { + "bbox": [ + 91, + 423, + 105, + 436 + ], + "spans": [ + { + "bbox": [ + 91, + 423, + 105, + 436 + ], + "type": "image", + "image_path": "57dca823c8b7ae7cfba9806c2ff0b51209622a04e30dde754ec83a334852b024.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 91, + 437, + 104, + 449 + ], + "blocks": [ + { + "bbox": [ + 91, + 437, + 104, + 449 + ], + "lines": [ + { + "bbox": [ + 91, + 437, + 104, + 449 + ], + "spans": [ + { + "bbox": [ + 91, + 437, + 104, + 449 + ], + "type": "image", + "image_path": "dbabd07881d038bbba4ce14a7b5fd344bdba0571ccbedb609f2101aa9a734980.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 92, + 451, + 104, + 461 + ], + "blocks": [ + { + "bbox": [ + 92, + 451, + 104, + 461 + ], + "lines": [ + { + "bbox": [ + 92, + 451, + 104, + 461 + ], + "spans": [ + { + "bbox": [ + 92, + 451, + 104, + 461 + ], + "type": "image", + "image_path": "b489c9848f844e933df64a0bc9c22d644c9e8218729b4c01c657c8dd7c272c9f.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 92, + 463, + 104, + 473 + ], + "blocks": [ + { + "bbox": [ + 92, + 463, + 104, + 473 + ], + "lines": [ + { + "bbox": [ + 92, + 463, + 104, + 473 + ], + "spans": [ + { + "bbox": [ + 92, + 463, + 104, + 473 + ], + "type": "image", + "image_path": "949e3a4965caf39889b4cadbd454b66dcf6db9c4bb79bec0c563cbfa0d7df351.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "bbox": [ + 115, + 428, + 229, + 475 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 115, + 428, + 166, + 438 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 428, + 166, + 438 + ], + "spans": [ + { + "bbox": [ + 115, + 428, + 166, + 438 + ], + "type": "text", + "content": "qa.allen.ai" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 115, + 439, + 229, + 449 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 439, + 229, + 449 + ], + "spans": [ + { + "bbox": [ + 115, + 439, + 229, + 449 + ], + "type": "text", + "content": "allenai/ai2-scholarqa-lib" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 115, + 452, + 162, + 461 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 452, + 162, + 461 + ], + "spans": [ + { + "bbox": [ + 115, + 452, + 162, + 461 + ], + "type": "text", + "content": "Demo Video" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 115, + 464, + 180, + 475 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 464, + 180, + 475 + ], + "spans": [ + { + "bbox": [ + 115, + 464, + 180, + 475 + ], + "type": "text", + "content": "Python Package" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 68, + 482, + 154, + 495 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 482, + 154, + 495 + ], + "spans": [ + { + "bbox": [ + 68, + 482, + 154, + 495 + ], + "type": "text", + "content": "1 Introduction" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 67, + 503, + 290, + 611 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 503, + 290, + 611 + ], + "spans": [ + { + "bbox": [ + 67, + 503, + 290, + 611 + ], + "type": "text", + "content": "Long-form scientific question answering systems use retrieval-augmented generation (RAG) (Lewis et al., 2020) over scientific literature to answer complex questions. These systems produce responses that bring together relevant insights from dozens of papers to help users rapidly learn about a body of scientific work. Examples are OpenScholar (Asai et al., 2024), Elicit, Consensus, and others §5." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 67, + 612, + 289, + 719 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 612, + 289, + 719 + ], + "spans": [ + { + "bbox": [ + 67, + 612, + 289, + 719 + ], + "type": "text", + "content": "Most of these systems are expensive to use and closed source, relying on models, workflows, and retrieval solutions not shared publicly. These issues create barriers for researchers who wish to study or build on the work. In response, we introduce Ai2 Scholar QA, a free-to-use scientific QA system (qa.allen.ai), and share our key components as open source software and public APIs." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 67, + 719, + 290, + 747 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 719, + 290, + 747 + ], + "spans": [ + { + "bbox": [ + 67, + 719, + 290, + 747 + ], + "type": "text", + "content": "Scholar QA follows a multi-stage pipeline (Figure 1) that starts by querying paper indexes: one" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 302, + 220, + 526, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 220, + 526, + 422 + ], + "spans": [ + { + "bbox": [ + 302, + 220, + 526, + 422 + ], + "type": "text", + "content": "from Semantic Scholar with over 100M abstracts, and a new index that we introduce in this work containing 11.7M full-text scientific papers. The pipeline then re-ranks the retrieved passages with a cross-encoder, and finally prompts a Large Language Model (LLM) to filter, cluster, and synthesize the passages into an answer. The final answer is presented to the user in a report with expandable sections of prose, bulleted lists, and tables. Claims in the answer are supported by citations, which can be clicked to reveal the cited paper's title and authors (with links to their corresponding Semantic Scholar pages), and in many cases relevant excerpt(s) from the paper, allowing for quick verification of the claim." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 302, + 423, + 526, + 586 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 423, + 526, + 586 + ], + "spans": [ + { + "bbox": [ + 302, + 423, + 526, + 586 + ], + "type": "text", + "content": "The system is based on open source code, enabling the community to reproduce and build on it. We release the code for our pipeline, prompting workflow and Web application. The retrieval indexes, including the new full text search index, are available as Semantic Scholar APIs and dataset downloads, and are continually updated with new articles (Kinney et al., 2023). Together, these resources can be combined with any generative LLM API to power a complete long-form scientific QA application. Our production system currently uses Anthropic's Claude 3.7 (Anthropic, 2024)." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 302, + 586, + 525, + 774 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 586, + 525, + 774 + ], + "spans": [ + { + "bbox": [ + 302, + 586, + 525, + 774 + ], + "type": "text", + "content": "We present analyses that justify key design decisions in our architecture in §4. Our choice of retrieval models and configuration is informed by evaluation over a collection of real and synthetic user queries and accompanying passages judged for relevance by a LLM, both of which we release publicly. We compare Scholar QA's answers against several baselines, demonstrating that it achieves state-of-the-art performance on the ScholarQA-CS benchmark (Asai et al., 2024). Finally, we discuss the reception of Scholar QA by users. The strong majority " + }, + { + "bbox": [ + 302, + 586, + 525, + 774 + ], + "type": "inline_equation", + "content": "(85\\%)" + }, + { + "bbox": [ + 302, + 586, + 525, + 774 + ], + "type": "text", + "content": " of user feedback is positive, and the reported issues suggest important improvements for future work." + } + ] + } + ], + "index": 21 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 13, + 248, + 36, + 592 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 13, + 248, + 36, + 592 + ], + "spans": [ + { + "bbox": [ + 13, + 248, + 36, + 592 + ], + "type": "text", + "content": "arXiv:2504.10861v2 [cs.CL] 28 Jul 2025" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 81, + 752, + 155, + 761 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 81, + 752, + 155, + 761 + ], + "spans": [ + { + "bbox": [ + 81, + 752, + 155, + 761 + ], + "type": "text", + "content": "* Core contributors" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 81, + 762, + 220, + 773 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 81, + 762, + 220, + 773 + ], + "spans": [ + { + "bbox": [ + 81, + 762, + 220, + 773 + ], + "type": "text", + "content": "1We use closed state-of-the-art LLMs." + } + ] + } + ], + "index": 24 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 69, + 68, + 526, + 169 + ], + "blocks": [ + { + "bbox": [ + 69, + 68, + 526, + 169 + ], + "lines": [ + { + "bbox": [ + 69, + 68, + 526, + 169 + ], + "spans": [ + { + "bbox": [ + 69, + 68, + 526, + 169 + ], + "type": "image", + "image_path": "b5df53f86fb06f99e0f4351fefeea2efe4c0a077e930e6c8769c098085df09e0.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 213, + 174, + 381, + 188 + ], + "lines": [ + { + "bbox": [ + 213, + 174, + 381, + 188 + ], + "spans": [ + { + "bbox": [ + 213, + 174, + 381, + 188 + ], + "type": "text", + "content": "Figure 1: Scholar QA Pipeline Overview" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 194, + 131, + 208 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 194, + 131, + 208 + ], + "spans": [ + { + "bbox": [ + 67, + 194, + 131, + 208 + ], + "type": "text", + "content": "2 Pipeline" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 211, + 291, + 320 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 211, + 291, + 320 + ], + "spans": [ + { + "bbox": [ + 67, + 211, + 291, + 320 + ], + "type": "text", + "content": "The Scholar QA architecture (Figure 1) has three primary components: 1) retrieval to identify relevant passages from a corpus of scientific literature; 2) a neural cross-encoder that re-ranks the passages to select the most relevant top-k; and 3) multi-step LLM generation to process the passages into a comprehensive report. Next, we describe each component of the pipeline in detail." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 322, + 291, + 377 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 322, + 291, + 377 + ], + "spans": [ + { + "bbox": [ + 67, + 322, + 291, + 377 + ], + "type": "text", + "content": "Query Validation. Prior to processing a query, we employ OpenAI's omni-moderation-latest" + }, + { + "bbox": [ + 67, + 322, + 291, + 377 + ], + "type": "inline_equation", + "content": "^2" + }, + { + "bbox": [ + 67, + 322, + 291, + 377 + ], + "type": "text", + "content": " model for safeguarding against potentially harmful content and return appropriate error messages." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 385, + 139, + 397 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 385, + 139, + 397 + ], + "spans": [ + { + "bbox": [ + 67, + 385, + 139, + 397 + ], + "type": "text", + "content": "2.1 Retrieval" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 402, + 291, + 511 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 402, + 291, + 511 + ], + "spans": [ + { + "bbox": [ + 67, + 402, + 291, + 511 + ], + "type": "text", + "content": "We use the Semantic Scholar API (Kinney et al., 2023) for retrieval, specifically its endpoint for keyword search over paper abstracts, and our new endpoint for querying snippets from open-access papers. A query decomposer re-formulates the user query for each endpoint and retrieves up to 256 snippets and 20 abstracts. These texts are referred to as \"passages\" below." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 513, + 291, + 650 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 513, + 291, + 650 + ], + "spans": [ + { + "bbox": [ + 67, + 513, + 291, + 650 + ], + "type": "text", + "content": "Query Decomposer. The two retrieval endpoints differ in their effective query formats (one targets keyword and the other semantic queries) and filtering of results based on the user's preferences for paper metadata (paper year, venue, field of study). In our query decomposition step, an LLM is prompted to re-format the user query into paraphrases appropriate for each endpoint, and to extract the user's requested settings for the metadata filters. We use the outputs of this step for retrieval." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 651, + 292, + 747 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 651, + 292, + 747 + ], + "spans": [ + { + "bbox": [ + 67, + 651, + 292, + 747 + ], + "type": "text", + "content": "Search APIs. The Semantic Scholar keyword search API is described in Kinney et al. (2023). We introduce a new /snippet/search endpoint, which searches over a corpus of passages extracted from S2ORC (Lo et al., 2020), loaded into a Vespa cluster with papers and passages. Papers include metadata for filtering. Passages are derived from a pa" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 302, + 194, + 526, + 248 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 194, + 526, + 248 + ], + "spans": [ + { + "bbox": [ + 302, + 194, + 526, + 248 + ], + "type": "text", + "content": "per's title, abstract, or body and can be filtered at the paper level. The index includes 11.7M full-text papers across the fields of study listed here, and a total of 285.6M passages." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 302, + 249, + 526, + 370 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 249, + 526, + 370 + ], + "spans": [ + { + "bbox": [ + 302, + 249, + 526, + 370 + ], + "type": "text", + "content": "Each passage is limited to 480 tokens and truncated at sentence and section boundaries where possible, having an overlap of one sentence (up to 64 tokens) with the preceding and following passages. Passage text is embedded with mxbai-embed-large-v1 (Lee et al., 2024) with binary quantization, and placed into a dense (approximate nearest neighbor) index, as well as a traditional sparse keyword index." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 302, + 372, + 525, + 426 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 372, + 525, + 426 + ], + "spans": [ + { + "bbox": [ + 302, + 372, + 525, + 426 + ], + "type": "text", + "content": "We first retrieve a union of embedding and keyword-based matches, applying any specified filters. The filtered results are ranked with a weighted sum of embedding similarity and bm25 scores." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 302, + 438, + 381, + 451 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 438, + 381, + 451 + ], + "spans": [ + { + "bbox": [ + 302, + 438, + 381, + 451 + ], + "type": "text", + "content": "2.2 Reranking" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 302, + 456, + 525, + 579 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 456, + 525, + 579 + ], + "spans": [ + { + "bbox": [ + 302, + 456, + 525, + 579 + ], + "type": "text", + "content": "The passages obtained from the retrieval step are subsequently passed to a neural re-ranker and the top 50 results are retained. The re-ranker is a cross-encoder that encodes both the query and a candidate document simultaneously and outputs a relevance score used to rank the documents. We selected mxbai-erank-large-v1 (Shakir et al., 2024) based on the results in §4.2 and host it on Modal with a single NVIDIA L40S GPU." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 302, + 590, + 436, + 603 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 590, + 436, + 603 + ], + "spans": [ + { + "bbox": [ + 302, + 590, + 436, + 603 + ], + "type": "text", + "content": "2.3 Multi-step Generation" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 302, + 609, + 526, + 717 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 609, + 526, + 717 + ], + "spans": [ + { + "bbox": [ + 302, + 609, + 526, + 717 + ], + "type": "text", + "content": "The generation phase employs a three-step approach: first, the retrieved passages are processed to extract more precise quotes relevant to the query; second, the quotes are thematically clustered into separate sections appropriate for the answer; finally, a controlled generation process composes the final report one section at a time, synthesizing the quotes assigned to that section." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 302, + 721, + 525, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 721, + 525, + 775 + ], + "spans": [ + { + "bbox": [ + 302, + 721, + 525, + 775 + ], + "type": "text", + "content": "Quote extraction. Passages from the retrieval stage can be lengthy and may contain extraneous information not useful for answering the user query (Asai et al., 2023). The quote extraction stage aims" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 67, + 751, + 267, + 773 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 751, + 267, + 773 + ], + "spans": [ + { + "bbox": [ + 67, + 751, + 267, + 773 + ], + "type": "text", + "content": "2https://platform.openai.com/docs/guides/moderation" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 71, + 289, + 98 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 71, + 289, + 98 + ], + "spans": [ + { + "bbox": [ + 67, + 71, + 289, + 98 + ], + "type": "text", + "content": "to select only the most relevant quotes from the passages to improve the precision of the answer." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 100, + 291, + 250 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 100, + 291, + 250 + ], + "spans": [ + { + "bbox": [ + 67, + 100, + 291, + 250 + ], + "type": "text", + "content": "We instruct an LLM to extract verbatim quotes that directly contribute to answering the query (Slobodkin et al., 2024). As input to the extraction, we gather all passages from the re-ranker for a given paper, and concatenate these to the abstract of the paper. This aggregation helps create a richer context conducive to extracting relevant quotes. The LLM processes each paper's content independently and returns the selected quotes separated by ellipses. If the entire paper context is deemed irrelevant, it is discarded from further processing." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 253, + 291, + 376 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 253, + 291, + 376 + ], + "spans": [ + { + "bbox": [ + 67, + 253, + 291, + 376 + ], + "type": "text", + "content": "Answer Outline and Clustering. For generating a comprehensive research report, the effective organization of reference materials is essential for its overall coherence. We propose a thematic outline framework where the answer is divided into sections representing topics, and the reference quotes are assigned to these topics. This mapping allows the system to selectively focus only on the pertinent subset of quotes when synthesizing a section." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 378, + 291, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 378, + 291, + 594 + ], + "spans": [ + { + "bbox": [ + 67, + 378, + 291, + 594 + ], + "type": "text", + "content": "First, the LLM is instructed to generate a list of themes in logical order and the appropriate synthesis format for each theme, independent of the quotes from the previous step. The first section is always an introduction or background to provide the user the basics for understanding the answer. The format of each section can be either a paragraph or a bulleted list, serving different information needs. Paragraphs convey nuanced summaries from multiple papers, while bulleted lists enumerate related papers (e.g., models, datasets, or interactive systems). These list are also the catalyst for generating the comparison tables (see §2.3). Following this, the sections are assigned 0 or more quotes. In case no quote is assigned to a section, it is generated completely from the LLM weights." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 599, + 291, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 599, + 291, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 599, + 291, + 775 + ], + "type": "text", + "content": "Report Generation. With the answer outline in place, each section of the report is synthesized serially conditioned on the query, reference sources, and the sections prior to it. The LLM is also instructed to generate a TLDR for each section. The references are either the quotes assigned to the section or abstracts of papers that are cited within these quotes. This citation following method allows the LLM to condition on and cite foundational sources which are not uncovered in retrieval. The LLM is instructed to cite the sources for each claim in the generated section text and cite generations from its parameters as LLM Memory." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 302, + 71, + 526, + 329 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 71, + 526, + 329 + ], + "spans": [ + { + "bbox": [ + 302, + 71, + 526, + 329 + ], + "type": "text", + "content": "Paper Comparison Table Generation. Since bulleted list sections typically include closely related papers (e.g., different datasets), we additionally generate tables that compare and contrast all papers cited in that section using common aspects (e.g., size and annotation method). This pipeline is detailed in Newman et al. (2024). At a high level, the inputs are the query to Scholar QA, the section title, and the abstracts of all papers cited in the section. An LLM first produces a set of common aspects (columns) to compare papers (rows). Each cell (paper-aspect pair) is filled with a value using the full-text of the paper. Finally, as not all aspects are applicable to every paper (e.g., one paper might not be about a dataset), we filter out columns and rows with a high proportion of missing values. Figure 3 [A] shows an expanded table in Scholar QA where related papers from a section are compared across a set of common aspects ([B])." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 302, + 341, + 525, + 354 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 341, + 525, + 354 + ], + "spans": [ + { + "bbox": [ + 302, + 341, + 525, + 354 + ], + "type": "text", + "content": "3 Scholar QA: Interface and Source Code" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 302, + 364, + 526, + 486 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 364, + 526, + 486 + ], + "spans": [ + { + "bbox": [ + 302, + 364, + 526, + 486 + ], + "type": "text", + "content": "Scholar QA is open-sourced as an extensible Python package (ai2-scholar-qa) and a Typescript and React-based interactive web application. The LLM functionality of Scholar QA is implemented with litellm, which supports swapping a variety of models using your own keys. Thus, the community can build upon Scholar QA and easily visualize the results (examples in Appendix A). Below we describe the user experience of the demo.3" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 302, + 499, + 527, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 499, + 527, + 662 + ], + "spans": [ + { + "bbox": [ + 302, + 499, + 527, + 662 + ], + "type": "text", + "content": "Progress and Section Streaming. High system latency can hinder usability. On average, Scholar QA produces a full report in 2.5 minutes (N=500, " + }, + { + "bbox": [ + 302, + 499, + 527, + 662 + ], + "type": "inline_equation", + "content": "\\sigma = 70\\mathrm{s}" + }, + { + "bbox": [ + 302, + 499, + 527, + 662 + ], + "type": "text", + "content": "), which is comparable to modern LLM-based research tools. To further improve usability, the following designs were used: 1) Displaying detailed real-time progress of the system (Nielsen, 1994) so users can examine the number of papers, passages, and sections being processed. 2) Presenting each section as soon as it is generated, so users can begin browsing the first section in 50 seconds (N=500, " + }, + { + "bbox": [ + 302, + 499, + 527, + 662 + ], + "type": "inline_equation", + "content": "\\sigma = 24\\mathrm{s}" + }, + { + "bbox": [ + 302, + 499, + 527, + 662 + ], + "type": "text", + "content": ") post issuing a query (Appendix H)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 302, + 671, + 526, + 752 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 671, + 526, + 752 + ], + "spans": [ + { + "bbox": [ + 302, + 671, + 526, + 752 + ], + "type": "text", + "content": "Expandable Sections. By default, sections are collapsed showing only their titles, TLDR summaries, and number of cited sources. This gives users a gist of the information included in the report (Figure 2 [A]). Users can then click on the title of a section they wish to read to expand it ([B])." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 302, + 752, + 525, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 752, + 525, + 775 + ], + "spans": [ + { + "bbox": [ + 302, + 752, + 525, + 775 + ], + "type": "text", + "content": "3Our production system has a few additional features like downloadable reports, login and links to other Ai2 systems." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [], + "page_size": [ + 595, + 841 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 91, + 68, + 501, + 435 + ], + "blocks": [ + { + "bbox": [ + 91, + 68, + 501, + 435 + ], + "lines": [ + { + "bbox": [ + 91, + 68, + 501, + 435 + ], + "spans": [ + { + "bbox": [ + 91, + 68, + 501, + 435 + ], + "type": "image", + "image_path": "c9a8ba26e2c2e5a83296e69aea82c8dcfe28e652909eb1832eb214e2af5358ec.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 442, + 525, + 467 + ], + "lines": [ + { + "bbox": [ + 67, + 442, + 525, + 467 + ], + "spans": [ + { + "bbox": [ + 67, + 442, + 525, + 467 + ], + "type": "text", + "content": "Figure 2: Multi-section [B] report generated by Scholar QA. References are linked to supporting excerpts [C]. Thumbs and free text feedback are collected for the full report [A], and also for each section and inline table." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 488, + 290, + 582 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 488, + 290, + 582 + ], + "spans": [ + { + "bbox": [ + 67, + 488, + 290, + 582 + ], + "type": "text", + "content": "References and Evidence Excerpts. To verify the claims in the report, users can click on the inline citations (Figure 2 [C]) or the pink excerpt icon in the inline table cells (Figure 3 [C]) to bring up a popup paper card. From the paper card, they can see the relevant excerpts used during the generation or click on the title to open the paper directly." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 586, + 291, + 626 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 586, + 291, + 626 + ], + "spans": [ + { + "bbox": [ + 67, + 586, + 291, + 626 + ], + "type": "text", + "content": "User Feedback Collection. We collect thumbs up/down or textual feedback for the whole report (Figure 2 [A]) and at each section and inline table." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 640, + 145, + 653 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 640, + 145, + 653 + ], + "spans": [ + { + "bbox": [ + 67, + 640, + 145, + 653 + ], + "type": "text", + "content": "4 Evaluation" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 662, + 139, + 674 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 662, + 139, + 674 + ], + "spans": [ + { + "bbox": [ + 67, + 662, + 139, + 674 + ], + "type": "text", + "content": "4.1 Retrieval" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 681, + 291, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 681, + 291, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 681, + 291, + 775 + ], + "type": "text", + "content": "We tuned our retrieval setup by optimizing ranking over a dev set of 500 synthetic queries (see Appendix C) and the top 1000 passages for each based on GIST embedding distance (Solatorio, 2024). We generated binary relevance labels with gpt-4-turbo (see Appendix B for the prompt), which were found to have " + }, + { + "bbox": [ + 67, + 681, + 291, + 775 + ], + "type": "inline_equation", + "content": "80\\%" + }, + { + "bbox": [ + 67, + 681, + 291, + 775 + ], + "type": "text", + "content": " agreement with" + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 305, + 510, + 526, + 698 + ], + "blocks": [ + { + "bbox": [ + 305, + 510, + 526, + 698 + ], + "lines": [ + { + "bbox": [ + 305, + 510, + 526, + 698 + ], + "spans": [ + { + "bbox": [ + 305, + 510, + 526, + 698 + ], + "type": "image", + "image_path": "b8d80b8b7ef2cc0d21f8dccb37dcca884443d727a60e15f37512e39fd2a5eff8.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 302, + 718, + 526, + 755 + ], + "lines": [ + { + "bbox": [ + 302, + 718, + 526, + 755 + ], + "spans": [ + { + "bbox": [ + 302, + 718, + 526, + 755 + ], + "type": "text", + "content": "Figure 3: Inline tables compare papers [A] with common aspects [B] with values linked to supporting excerpts from the papers [C]." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + } + ], + "discarded_blocks": [], + "page_size": [ + 595, + 841 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 70, + 71, + 286, + 197 + ], + "blocks": [ + { + "bbox": [ + 70, + 71, + 286, + 197 + ], + "lines": [ + { + "bbox": [ + 70, + 71, + 286, + 197 + ], + "spans": [ + { + "bbox": [ + 70, + 71, + 286, + 197 + ], + "type": "image", + "image_path": "81b8047cfb6dc431ad8aec3f0c23b80473c05396747e736928043641fcddda80.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 210, + 291, + 282 + ], + "lines": [ + { + "bbox": [ + 67, + 210, + 291, + 282 + ], + "spans": [ + { + "bbox": [ + 67, + 210, + 291, + 282 + ], + "type": "text", + "content": "Figure 4: Embedding ranking performance for various compression methods and matryoshka cutoffs. The " + }, + { + "bbox": [ + 67, + 210, + 291, + 282 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 67, + 210, + 291, + 282 + ], + "type": "text", + "content": "-axis indicates the size of the vector index based relative to using int8 quantization and the full embedding size. The red circle indicates the selected configuration. Embedding size is notated next to each point." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 294, + 271, + 307 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 294, + 271, + 307 + ], + "spans": [ + { + "bbox": [ + 67, + 294, + 271, + 307 + ], + "type": "text", + "content": "human annotators on a sample of 100 queries." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 312, + 290, + 391 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 312, + 290, + 391 + ], + "spans": [ + { + "bbox": [ + 67, + 312, + 290, + 391 + ], + "type": "text", + "content": "Pipeline Tuning. We optimized several aspects of retrieval over this dev set: embedding model selection and quantization method for it, the components and weights in the final ensemble, and (when relevant) the target Matryoshka dimension for the embeddings (Kusupati et al., 2024)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 393, + 290, + 650 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 393, + 290, + 650 + ], + "spans": [ + { + "bbox": [ + 69, + 393, + 290, + 650 + ], + "type": "text", + "content": "We experimented with medium sized embedding models based on top performers on the retriever and ranking tasks of the MTEB (Muennighoff et al., 2022) leaderboard on HuggingFace. Table 4 in Appendix D lists our candidate models. The mxbai-embed-large-v1 (Lee et al., 2024) embeddings performed best over our dev set. Figure 4 validates our choice of quantization method and target Matryoshka dimension for these embeddings. We chose unary quantization with no Matryoshka truncation, (indicated by a red circle on the plot) since it satisfied our storage constraints without a large drop in performance. We experimented with assembling SparseEmbed (Kong et al., 2023), embedding cosine similarity, BM25, and chose the latter two (weight split of (0.6, 0.4) respectively) based on the results (See Appendix E). The BM25 scores are normalized with min-max scaling before computing the ensemble score." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 661, + 145, + 675 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 661, + 145, + 675 + ], + "spans": [ + { + "bbox": [ + 67, + 661, + 145, + 675 + ], + "type": "text", + "content": "4.2 Reranking" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 680, + 291, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 680, + 291, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 680, + 291, + 775 + ], + "type": "text", + "content": "We chose the re-ranker based on evaluation over a mixture of real scientific questions from the Stack Exchange Computer Science, Math, and Statistics communities, real research queries written by the authors and their colleagues, and synthetic ones generated by fine-tuning GPT-4o-mini over questions from the ScholarQA-CS dataset (Asai et al.," + } + ] + } + ], + "index": 6 + }, + { + "type": "table", + "bbox": [ + 304, + 68, + 528, + 172 + ], + "blocks": [ + { + "bbox": [ + 304, + 68, + 528, + 172 + ], + "lines": [ + { + "bbox": [ + 304, + 68, + 528, + 172 + ], + "spans": [ + { + "bbox": [ + 304, + 68, + 528, + 172 + ], + "type": "table", + "html": "
Model (Size)Latency (sec/query)nDCG @ 10mRR
bge-reranker-v2-m3 (568M)0.140.9130.973
akariasai/ranker_large (568M)0.140.9060.970
jina-reranker-v2-base (278M)0.060.9070.972
mxbai-rerank-large-v1 (435M)0.460.9270.975
mxbai-rerank-base-v1 (184M)0.190.9190.974
mxbai-rerank-xsmall-v1 (70M)0.110.9110.970
mxbai-rerank-base-v2 (0.5B)0.400.9180.974
mxbai-rerank-large-v2 (1.5B)0.700.9110.975
", + "image_path": "adc1effc9e55d3dbeb3ab620fb48b91360028f713edb853bede83ee8f1953340.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_body" + } + ], + "index": 7 + }, + { + "bbox": [ + 302, + 181, + 524, + 206 + ], + "lines": [ + { + "bbox": [ + 302, + 181, + 524, + 206 + ], + "spans": [ + { + "bbox": [ + 302, + 181, + 524, + 206 + ], + "type": "text", + "content": "Table 1: Cross encoder re-ranker results on our dataset of GPT-4o labels. The best results are highlighted." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 302, + 216, + 525, + 366 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 216, + 525, + 366 + ], + "spans": [ + { + "bbox": [ + 302, + 216, + 525, + 366 + ], + "type": "text", + "content": "2024). For a given query, passages are retrieved and then awarded a relevance score in the range 0-3 with GPT-4o. We experiment with multiple state-of-the-art re-rankers (Chen et al., 2024; Shakir et al., 2024; Asai et al., 2024), and, as shown in Table 2, mxbai-erank-large-v1 gives the best results across the board (even outperforming its v2 model on our task). To reduce latency for deployment, we implemented optimizations like Pytorch model compilation. We release the evaluation data consisting of 2,426 queries and 225,618 passages." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 302, + 376, + 383, + 389 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 376, + 383, + 389 + ], + "spans": [ + { + "bbox": [ + 302, + 376, + 383, + 389 + ], + "type": "text", + "content": "4.3 Generation" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 301, + 395, + 525, + 530 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 301, + 395, + 525, + 530 + ], + "spans": [ + { + "bbox": [ + 301, + 395, + 525, + 530 + ], + "type": "text", + "content": "We evaluate the final output of Scholar QA on the ScholarQA-CS dataset which consists of expert-annotated rubrics for 100 Computer Science research questions. The question-specific expert rubrics account for " + }, + { + "bbox": [ + 301, + 395, + 525, + 530 + ], + "type": "inline_equation", + "content": "60\\%" + }, + { + "bbox": [ + 301, + 395, + 525, + 530 + ], + "type": "text", + "content": " of the final score, while the rest is computed based on global metrics of length, expertise and citations. We use GPT-4o (Hurst et al., 2024) as a judge with the utility provided by Asai et al. (2024) for automatic evaluation and compare against several baselines." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 302, + 531, + 525, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 531, + 525, + 775 + ], + "spans": [ + { + "bbox": [ + 302, + 531, + 525, + 775 + ], + "type": "text", + "content": "As shown in Table 2, our system outperforms popular LLMs: Llama 3.1 (Dubey et al., 2024), GPT 4.1 and Claude Sonnet 3.7 (Anthropic, 2024). It even outperforms reasoning models such as Sonnet 3.7 Thinking (Anthropic, 2025), o1-mini (OpenAI, 2024b) and o3-mini (Zhang et al., 2025) overall on the Scholar QA-CS benchmark. This setup lacks any retrieval so the models generate the responses completely from parametric memory. The benchmark rewards attribution and supporting evidence as a measure of trust in the system, so these models score lower overall. The reasoning based models perform better than our system on the rubrics score, which suggests that they may be superior backbones for our system. However, due to the additional reasoning tokens, these models are more expensive and also significantly increase latency." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [], + "page_size": [ + 595, + 841 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 71, + 293, + 477 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 71, + 293, + 477 + ], + "spans": [ + { + "bbox": [ + 67, + 71, + 293, + 477 + ], + "type": "text", + "content": "For contemporary QA systems, we compare against OpenScholar with GPT-4o" + }, + { + "bbox": [ + 67, + 71, + 293, + 477 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 67, + 71, + 293, + 477 + ], + "type": "text", + "content": ", PaperQA2 (Skarlinski et al., 2024), Perplexity's Sonar Deep Research and STORM (Shao et al., 2024a). PaperQA2 did not release their retrieval corpus, so we substitute it with our retrieval pipeline for a fair comparison. Scholar QA obtains the best scores both on rubrics and overall, with the variant using Claude 3.7 Sonnet as the backbone scoring 2.4 points higher than STORM. For these QA systems, we also evaluate the attribution quality based on ALCE (Gao et al., 2023), which proposes entailment between claims and evidence to compute citation precision and recall. Again, we use GPT-4o as a judge to predict entailment (See Appendix F for the prompt) and treat each sentence in a response as a claim. Even with a report spanning multiple sections where all the sentences might not be cited, Scholar QA comes out far ahead of the other QA systems. Due to a lack of retrieval, this evaluation was not conducted when the LLMs are simply prompted to generate a response from memory. An interesting discovery from our analysis was that with an updated version of GPT-4o (i.e. gpt-4o-2024-11-20) as the judge, the scores are inflated compared to using gpt-4o-2024-08-06, even though the relative rankings are consistent (See Appendix J). For parity with Asai et al. (2023), we report the rubrics and citation scores with the older and newer model as the judge, respectively." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 479, + 291, + 694 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 479, + 291, + 694 + ], + "spans": [ + { + "bbox": [ + 69, + 479, + 291, + 694 + ], + "type": "text", + "content": "During our initial experiments, we restricted ScholarQA to only summarize the insights conditioned on the quotes extracted from retrieved passages. However, in cases where the retrieved passages were not relevant enough, the system failed to answer the question in favor of just discussing the information in the quotes. Moreover, for over " + }, + { + "bbox": [ + 69, + 479, + 291, + 694 + ], + "type": "inline_equation", + "content": "30\\%" + }, + { + "bbox": [ + 69, + 479, + 291, + 694 + ], + "type": "text", + "content": " of instances in ScholarQA-CS, the rubrics require background information, even though the question might not. So, we updated our system LLM prompts to - a) Generate section text from memory if there is a lack of relevant retrieved passages and cite as LLM Memory and b) generate the first section as a background or introduction for the rest of the answer. The results reported here are obtained post these changes." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 696, + 291, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 696, + 291, + 723 + ], + "spans": [ + { + "bbox": [ + 67, + 696, + 291, + 723 + ], + "type": "text", + "content": "To finalize the backbone LLM for the production web application we conducted an anonymized pair" + } + ] + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 303, + 68, + 526, + 181 + ], + "blocks": [ + { + "bbox": [ + 303, + 68, + 526, + 181 + ], + "lines": [ + { + "bbox": [ + 303, + 68, + 526, + 181 + ], + "spans": [ + { + "bbox": [ + 303, + 68, + 526, + 181 + ], + "type": "table", + "html": "
ModelScoreModelScore
RubricsTotalRubricsTotalCite
LLM Prompt (No Retrieval)QA Systems
Llama 3.1-8B48.847.3SQA-Claude 3.7 S58.061.948.1
Llama 3.1-70B52.448.6SQA-Claude 3.5 S52.661.352.1
Claude 3.5 S50.446.6OS-GPT-4o49.353.525.9
Claude 3.7 S61.555.9PaperQA238.751.425.3
+Thinking62.755.7Perplex. Sonar DR38.752.825.2
GPT-4.163.256.2STORM54.259.540.2
o1-mini62.355.5
o3-mini60.650.2
", + "image_path": "9c0755d3a040f8844c31873f60f1ce98f75706719db24c2384f489c70c87a910.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 302, + 190, + 526, + 297 + ], + "lines": [ + { + "bbox": [ + 302, + 190, + 526, + 297 + ], + "spans": [ + { + "bbox": [ + 302, + 190, + 526, + 297 + ], + "type": "text", + "content": "Table 2: Evaluation results on ScholarQA-CS benchmark. System responses are either generated by simply prompting LLMs with the questions or by issuing the queries to RAG based QA systems. Expert annotated rubrics only scores are reported in addition to the overall total. The overall best results are highlighted and best results within a category are underlined. SQA: Ai2 Scholar QA, OS: Open Scholar, S: Sonnet, Claude 3.5 S: claude-3-5-sonnet-20241022." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 302, + 317, + 525, + 396 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 317, + 525, + 396 + ], + "spans": [ + { + "bbox": [ + 302, + 317, + 525, + 396 + ], + "type": "text", + "content": "wise comparison among the authors of this work. We compare Claude 3.7 against 3.5. Out of 18 comparisons, Claude 3.7 Sonnet was the overwhelming favorite with 17 wins, reinforcing our hypothesis that (with no other changes) our system improves with newer and better backbone LLMs." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 302, + 421, + 506, + 433 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 421, + 506, + 433 + ], + "spans": [ + { + "bbox": [ + 302, + 421, + 506, + 433 + ], + "type": "text", + "content": "4.4 Real-world Usage and User Feedback" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 301, + 446, + 526, + 635 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 301, + 446, + 526, + 635 + ], + "spans": [ + { + "bbox": [ + 301, + 446, + 526, + 635 + ], + "type": "text", + "content": "We have publicly deployed Scholar QA for 9 weeks, and received " + }, + { + "bbox": [ + 301, + 446, + 526, + 635 + ], + "type": "inline_equation", + "content": "30.2\\mathrm{k}" + }, + { + "bbox": [ + 301, + 446, + 526, + 635 + ], + "type": "text", + "content": " questions from 8,219 unique visitors. On average, each response is about " + }, + { + "bbox": [ + 301, + 446, + 526, + 635 + ], + "type": "inline_equation", + "content": "2.4\\mathrm{k}" + }, + { + "bbox": [ + 301, + 446, + 526, + 635 + ], + "type": "text", + "content": " words and costs $0.50 to produce. We observed 1,075 monthly repeated users who had issued queries on two distinct days over the course of a 30 day window. We analyze the user query types and the most prominent themes were deep-dive into specific research topics (15k) and comparative analysis of specific prior work (5k) (detailed distribution in Appendix I). A total of 2,433 thumbs feedback were submitted (Figure 2 [A]) and " + }, + { + "bbox": [ + 301, + 446, + 526, + 635 + ], + "type": "inline_equation", + "content": "85\\%" + }, + { + "bbox": [ + 301, + 446, + 526, + 635 + ], + "type": "text", + "content": " were positive. These suggests real-world users benefited from using Scholar QA." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 302, + 640, + 526, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 640, + 526, + 775 + ], + "spans": [ + { + "bbox": [ + 302, + 640, + 526, + 775 + ], + "type": "text", + "content": "For insight into the failure modes, we manually examined the 383 instances of neutral/negative freeform feedback. Table 3 lists the feedback types we identified along with their counts as of May 2025 (example feedback in Appendix G). We hypothesize that follow-up questions may help address insufficient answer detail and cases with a lack of retrieved documents, while improved retrieval may help address incomplete or incorrect references and off-topic responses." + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 67, + 732, + 291, + 775 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 732, + 291, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 732, + 291, + 775 + ], + "type": "text", + "content": "Our results are not identical to Asai et al. (2024) due to variance across LLM-as-a-judge runs. Their reported total score for OS-GPT-4o is 57.7. We re-ran the evaluation in order to obtain rubrics only scores, which they did not report." + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 69, + 68, + 289, + 164 + ], + "blocks": [ + { + "bbox": [ + 69, + 68, + 289, + 164 + ], + "lines": [ + { + "bbox": [ + 69, + 68, + 289, + 164 + ], + "spans": [ + { + "bbox": [ + 69, + 68, + 289, + 164 + ], + "type": "table", + "html": "
CategoryCount
Incorrect or Missing References126
Off-topic or Misunderstood Query113
Request for More Detail or Specificity289
General Feedback on Quality149
Language or Format Issues78
", + "image_path": "1615ec25e8c63ce70bbe3adc92df8285d9e22d8f3252708cf9a5a2e898598511.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 94, + 172, + 263, + 185 + ], + "lines": [ + { + "bbox": [ + 94, + 172, + 263, + 185 + ], + "spans": [ + { + "bbox": [ + 94, + 172, + 263, + 185 + ], + "type": "text", + "content": "Table 3: Feedback Categories and Counts" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 68, + 197, + 161, + 210 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 197, + 161, + 210 + ], + "spans": [ + { + "bbox": [ + 68, + 197, + 161, + 210 + ], + "type": "text", + "content": "5 Related Work" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 222, + 291, + 560 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 222, + 291, + 560 + ], + "spans": [ + { + "bbox": [ + 69, + 222, + 291, + 560 + ], + "type": "text", + "content": "Scientific Question Answering. Answering scientific questions involves navigating scholarly sources and accurately retrieving and synthesizing them. Recently, OpenScholar (Asai et al., 2024) introduced a retrieval-augmented model designed explicitly for scientific literature synthesis with citation-supported responses with significant improvement in accuracy and reduced citation hallucination. Scholar QA extends its capabilities by leveraging the latest state-of-the-art LLMs and an open source generation pipeline that filters literature into precise quotes and produces thematically organized and detailed answers. STORM (Shao et al., 2024b) synthesizes comprehensive, Wikipedia-like articles, a distinct task from long-form scientific question answering. Other works have focused on literature review synthesis: LitLLM (Agarwal et al., 2024), which like Scholar QA uses a structured planning-and-generation pipeline similar, and SurveyForge (Yan et al., 2025), which outlines heuristics before generation. Their code was not available at the time of our evaluation. Zhou et al. (2025) present a survey categorizing AI-driven research support systems across various stages of the scientific process, including literature synthesis." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 571, + 291, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 571, + 291, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 571, + 291, + 775 + ], + "type": "text", + "content": "Commercial Tools for Scientific QA. Commercial RAG tools have emerged to facilitate research specifically tailored for scientific literature, such as Consensus (Consensus, 2024), which synthesizes findings from research papers, Scite (Scite, 2024), which evaluates claims by analyzing citation contexts, and Elicit (Elicit, 2024), which supports structured scientific literature reviews. Other general-purpose tools also support scientific inquiries: Perplexity (Perplexity, 2024), You.com (You.com, 2024), OpenAI Deep Research (OpenAI, 2024a) and Gemini Deep Research (DeepMind, 2024). Although these platforms leverage advanced retrieval and generation capabilities to facilitate literature reviews and deliver rapid insights," + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 302, + 71, + 525, + 139 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 71, + 525, + 139 + ], + "spans": [ + { + "bbox": [ + 302, + 71, + 525, + 139 + ], + "type": "text", + "content": "they can be too expensive for widespread academic use and typically lack transparency regarding their pipelines. In contrast, Scholar QA is free with open sourced code and access to search APIs that enable the research community to build upon it." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 302, + 148, + 381, + 161 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 148, + 381, + 161 + ], + "spans": [ + { + "bbox": [ + 302, + 148, + 381, + 161 + ], + "type": "text", + "content": "6 Conclusion" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 302, + 170, + 526, + 251 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 170, + 526, + 251 + ], + "spans": [ + { + "bbox": [ + 302, + 170, + 526, + 251 + ], + "type": "text", + "content": "We present Ai2 Scholar QA, a freely-available longform literature synthesis system that generates reports for complex scientific questions. We release key components as open source code and public APIs, and report experiments analyzing design decisions and demonstrate state-of-the-art results." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 303, + 261, + 365, + 274 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 261, + 365, + 274 + ], + "spans": [ + { + "bbox": [ + 303, + 261, + 365, + 274 + ], + "type": "text", + "content": "Limitations" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 302, + 283, + 525, + 337 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 283, + 525, + 337 + ], + "spans": [ + { + "bbox": [ + 302, + 283, + 525, + 337 + ], + "type": "text", + "content": "Supplementing the user feedback discussed in subsection 4.4, we would like to outline some limitations of our system and evaluation and our plans to mitigate them as part of fuvre work:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 309, + 346, + 525, + 535 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 346, + 525, + 535 + ], + "spans": [ + { + "bbox": [ + 309, + 346, + 525, + 535 + ], + "type": "text", + "content": "(i) Ai2 Scholar QA uses proprietary and closed-source LLM as the backbone for our production pipeline. As shown in Table 2, open source models lag behind the proprietary models in our evaluation. However, we are actively experimenting with open-sourced LLMs to replace the closed ones partially or completely in the pipeline. The open-sourced models will be specifically trained to do well on long-form scientific question answering and each of the sub-tasks in our multi-step generation. Further, our code is open-sourced and can easily be used with potentially any available LLM api provider supported by litellm." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 306, + 544, + 526, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 544, + 526, + 775 + ], + "spans": [ + { + "bbox": [ + 306, + 544, + 526, + 775 + ], + "type": "text", + "content": "(ii) We evaluate the answers generated by Scholar QA and compare against other systems on ScholarQA-CS dataset in subsection 4.3. Even though the answer rubrics are collected via human annotation, the evaluation is only limited to questions in the Computer Science domain and further relies completely on an LLM as the evaluator. In ongoing work, we are investigating more accurate benchmarks for evaluating long form scientific answers. Our approach uses real queries posed by users to Scholar QA, and human preference labels over answers from multiple systems in not just Computer Science, but Biomedicine and other scientific domains. These labels can serve as not only for evaluation, but also as training signals for models." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [], + "page_size": [ + 595, + 841 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "bbox": [ + 68, + 71, + 166, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 71, + 166, + 85 + ], + "spans": [ + { + "bbox": [ + 68, + 71, + 166, + 85 + ], + "type": "text", + "content": "Acknowledgments" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 92, + 292, + 227 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 92, + 292, + 227 + ], + "spans": [ + { + "bbox": [ + 67, + 92, + 292, + 227 + ], + "type": "text", + "content": "We would like to thank the anonymous reviewers for helpful comments, suggestions and feedback on the manuscript. We would also like to acknowledge the Ai2 ScholarQA users for providing constructive feedback that helped us improve the system. Finally, we thank David Albright for helping with the demo video, the Ai2 communications team for their help with user outreach, and Ai2 engineers and researchers for their help with user testing before launch." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 68, + 249, + 127, + 262 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 249, + 127, + 262 + ], + "spans": [ + { + "bbox": [ + 68, + 249, + 127, + 262 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 269, + 291, + 773 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 69, + 269, + 291, + 324 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 269, + 291, + 324 + ], + "spans": [ + { + "bbox": [ + 69, + 269, + 291, + 324 + ], + "type": "text", + "content": "Shubham Agarwal, Gaurav Sahu, Abhay Puri, Issam Hadj Laradji, Krishnamurthy Dj Dvijotham, Jason Stanley, Laurent Charlin, and Christopher Pal. 2024. Litllms, llms for literature review: Are we there yet?" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 332, + 290, + 354 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 332, + 290, + 354 + ], + "spans": [ + { + "bbox": [ + 69, + 332, + 290, + 354 + ], + "type": "text", + "content": "Anthropic. 2024. The claude 3 model family: Opus, sonnet, haiku." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 362, + 267, + 375 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 362, + 267, + 375 + ], + "spans": [ + { + "bbox": [ + 69, + 362, + 267, + 375 + ], + "type": "text", + "content": "Anthropic. 2025. Claude 3.7 sonnet system card." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 382, + 291, + 470 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 382, + 291, + 470 + ], + "spans": [ + { + "bbox": [ + 69, + 382, + 291, + 470 + ], + "type": "text", + "content": "Akari Asai, Jacqueline He, Rulin Shao, Weijia Shi, Amanpreet Singh, Joseph Chee Chang, Kyle Lo, Luca Soldaini, Sergey Feldman, Mike D'Arcy, David Wadden, Matt Latzke, Minyang Tian, Pan Ji, Shengyan Liu, Hao Tong, Bohao Wu, Yanyu Xiong, Luke S. Zettlemoyer, and 6 others. 2024. Openscholar: Synthesizing scientific literature with retrieval-augmented lms. ArXiv, abs/2411.14199." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 478, + 290, + 523 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 478, + 290, + 523 + ], + "spans": [ + { + "bbox": [ + 69, + 478, + 290, + 523 + ], + "type": "text", + "content": "Akari Asai, Zeqiu Wu, Yizhong Wang, Avirup Sil, and Hannaneh Hajishirzi. 2023. Self-rag: Learning to retrieve, generate, and critique through self-reflection. ArXiv, abs/2310.11511." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 69, + 531, + 290, + 587 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 531, + 290, + 587 + ], + "spans": [ + { + "bbox": [ + 69, + 531, + 290, + 587 + ], + "type": "text", + "content": "Jianlv Chen, Shitao Xiao, Peitian Zhang, Kun Luo, Defu Lian, and Zheng Liu. 2024. Bge m3-embedding: Multi-lingual, multi-functionality, multi-granularity text embeddings through self-knowledge distillation. Preprint, arXiv:2402.03216." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 69, + 594, + 290, + 616 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 594, + 290, + 616 + ], + "spans": [ + { + "bbox": [ + 69, + 594, + 290, + 616 + ], + "type": "text", + "content": "Consensus. 2024. Consensus - ai for research. Accessed: 2025-03-28." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 69, + 624, + 290, + 647 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 624, + 290, + 647 + ], + "spans": [ + { + "bbox": [ + 69, + 624, + 290, + 647 + ], + "type": "text", + "content": "Google DeepMind. 2024. Gemini - deep research mode. Accessed: 2025-03-28." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 69, + 655, + 290, + 743 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 655, + 290, + 743 + ], + "spans": [ + { + "bbox": [ + 69, + 655, + 290, + 743 + ], + "type": "text", + "content": "Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Amy Yang, Angela Fan, Anirudh Goyal, Anthony S. Hartshorn, Aobo Yang, Archi Mitra, Archie Sravankumar, Artem Korenev, Arthur Hinsvark, Arun Rao, Aston Zhang, and 510 others. 2024. The llama 3 herd of models. ArXiv, abs/2407.21783." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 69, + 751, + 290, + 773 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 751, + 290, + 773 + ], + "spans": [ + { + "bbox": [ + 69, + 751, + 290, + 773 + ], + "type": "text", + "content": "Elicit. 2024. Elicit - the ai research assistant. Accessed: 2025-03-28." + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 304, + 72, + 526, + 774 + ], + "type": "list", + "angle": 0, + "index": 24, + "blocks": [ + { + "bbox": [ + 304, + 72, + 525, + 117 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 525, + 117 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 525, + 117 + ], + "type": "text", + "content": "Tianyu Gao, Howard Yen, Jiatong Yu, and Danqi Chen. 2023. Enabling large language models to generate text with citations. In Conference on Empirical Methods in Natural Language Processing." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 123, + 526, + 211 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 123, + 526, + 211 + ], + "spans": [ + { + "bbox": [ + 304, + 123, + 526, + 211 + ], + "type": "text", + "content": "OpenAI Aaron Hurst, Adam Lerer, Adam P. Goucher, Adam Perelman, Aditya Ramesh, Aidan Clark, AJ Ostrow, Akila Welihinda, Alan Hayes, Alec Radford, Aleksander Mkadry, Alex Baker-Whitcomb, Alex Beutel, Alex Borzunov, Alex Carney, Alex Chow, Alexander Kirillov, Alex Nichol, Alex Paino, and 397 others. 2024. Gpt-4o system card. ArXiv, abs/2410.21276." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 218, + 526, + 317 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 218, + 526, + 317 + ], + "spans": [ + { + "bbox": [ + 304, + 218, + 526, + 317 + ], + "type": "text", + "content": "Rodney Michael Kinney, Chloe Anastasiades, Russell Authur, Iz Beltagy, Jonathan Bragg, Alexandra Buraczynski, Isabel Cachola, Stefan Candra,oganand Chandrasekhar, Arman Cohen, Miles Crawford, Doug Downey, Jason Dunkelberger, Oren Etzioni, Rob Evans, Sergey Feldman, Joseph Gorney, David W. Graham, F.Q. Hu, and 29 others. 2023. The semantic scholar open data platform. *ArXiv*, abs/2301.10140." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 324, + 526, + 401 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 324, + 526, + 401 + ], + "spans": [ + { + "bbox": [ + 304, + 324, + 526, + 401 + ], + "type": "text", + "content": "Weize Kong, Jeffrey M. Dudek, Cheng Li, Mingyang Zhang, and Michael Bendersky. 2023. Sparseembed: Learning sparse lexical representations with contextual embeddings for retrieval. In Proceedings of the 46th International ACM SIGIR Conference on Research and Development in Information Retrieval, SIGIR '23, page 2399-2403. ACM." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 408, + 526, + 474 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 408, + 526, + 474 + ], + "spans": [ + { + "bbox": [ + 304, + 408, + 526, + 474 + ], + "type": "text", + "content": "Aditya Kusupati, Gantavya Bhatt, Aniket Rege, Matthew Wallingford, Aditya Sinha, Vivek Ramanujan, William Howard-Snyder, Kaifeng Chen, Sham Kakade, Prateek Jain, and Ali Farhadi. 2024. Matryoshka representation learning. Preprint, arXiv:2205.13147." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 304, + 481, + 525, + 515 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 481, + 525, + 515 + ], + "spans": [ + { + "bbox": [ + 304, + 481, + 525, + 515 + ], + "type": "text", + "content": "Sean Lee, Aamir Shakir, Darius Koenig, and Julius Lipp. 2024. Open source strikes bread - new fluffy embeddings model." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 304, + 522, + 526, + 587 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 522, + 526, + 587 + ], + "spans": [ + { + "bbox": [ + 304, + 522, + 526, + 587 + ], + "type": "text", + "content": "Patrick Lewis, Ethan Perez, Aleksandara Piktus, Fabio Petroni, Vladimir Karpukhin, Naman Goyal, Heinrich Kuttler, Mike Lewis, Wen tau Yih, Tim Rocktäschel, Sebastian Riedel, and Douwe Kiela. 2020. Retrieval-augmented generation for knowledge-intensive nlp tasks. ArXiv, abs/2005.11401." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 304, + 594, + 526, + 650 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 594, + 526, + 650 + ], + "spans": [ + { + "bbox": [ + 304, + 594, + 526, + 650 + ], + "type": "text", + "content": "Kyle Lo, Lucy Lu Wang, Mark Neumann, Rodney Michael Kinney, and Daniel S. Weld. 2020. S2orc: The semantic scholar open research corpus. In Annual Meeting of the Association for Computational Linguistics." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 304, + 656, + 525, + 702 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 656, + 525, + 702 + ], + "spans": [ + { + "bbox": [ + 304, + 656, + 525, + 702 + ], + "type": "text", + "content": "Niklas Muennighoff, Nouamane Tazi, Loic Magne, and Nils Reimers. 2022. Mteb: Massive text embedding benchmark. In Conference of the European Chapter of the Association for Computational Linguistics." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 304, + 708, + 526, + 774 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 708, + 526, + 774 + ], + "spans": [ + { + "bbox": [ + 304, + 708, + 526, + 774 + ], + "type": "text", + "content": "Benjamin Newman, Yoonjoo Lee, Aakanksha Naik, Pao Siangliulue, Raymond Fok, Juho Kim, Daniel S. Weld, Joseph Chee Chang, and Kyle Lo. 2024. Arxiv digestables: Synthesizing scientific literature into tables using language models. In Conference on Empirical Methods in Natural Language Processing." + } + ] + } + ], + "index": 23 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [], + "page_size": [ + 595, + 841 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 72, + 289, + 773 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 69, + 72, + 289, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 72, + 289, + 116 + ], + "spans": [ + { + "bbox": [ + 69, + 72, + 289, + 116 + ], + "type": "text", + "content": "Jakob Nielsen. 1994. Enhancing the explanatory power of usability heuristics. In Proceedings of the SIGCHI conference on Human Factors in Computing Systems, pages 152-158." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 126, + 289, + 147 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 126, + 289, + 147 + ], + "spans": [ + { + "bbox": [ + 69, + 126, + 289, + 147 + ], + "type": "text", + "content": "OpenAI. 2024a. Chatgpt - deep research mode. Accessed: 2025-03-28." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 158, + 231, + 169 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 158, + 231, + 169 + ], + "spans": [ + { + "bbox": [ + 69, + 158, + 231, + 169 + ], + "type": "text", + "content": "OpenAI. 2024b. Openai o1 system card." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 179, + 289, + 200 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 179, + 289, + 200 + ], + "spans": [ + { + "bbox": [ + 69, + 179, + 289, + 200 + ], + "type": "text", + "content": "Perplexity. 2024. Perplexity ai - ask anything. Accessed: 2025-03-28." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 211, + 289, + 265 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 211, + 289, + 265 + ], + "spans": [ + { + "bbox": [ + 69, + 211, + 289, + 265 + ], + "type": "text", + "content": "Nils Reimers and Iryna Gurevych. 2019. Sentence-bert: Sentence embeddings using siamese bert-networks. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing. Association for Computational Linguistics." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 276, + 289, + 296 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 276, + 289, + 296 + ], + "spans": [ + { + "bbox": [ + 69, + 276, + 289, + 296 + ], + "type": "text", + "content": "Scite. 2024. Scite - smart citations for research. Accessed: 2025-03-28." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 308, + 289, + 340 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 308, + 289, + 340 + ], + "spans": [ + { + "bbox": [ + 69, + 308, + 289, + 340 + ], + "type": "text", + "content": "Aamir Shakir, Darius Koenig, Julius Lipp, and Sean Lee. 2024. Boost your search with the crispy mixedbread rerank models." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 350, + 289, + 449 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 350, + 289, + 449 + ], + "spans": [ + { + "bbox": [ + 69, + 350, + 289, + 449 + ], + "type": "text", + "content": "Yijia Shao, Yucheng Jiang, Theodore Kanell, Peter Xu, Omar Khattab, and Monica Lam. 2024a. Assisting in writing Wikipedia-like articles from scratch with large language models. In Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), pages 6252-6278, Mexico City, Mexico. Association for Computational Linguistics." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 69, + 460, + 289, + 513 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 460, + 289, + 513 + ], + "spans": [ + { + "bbox": [ + 69, + 460, + 289, + 513 + ], + "type": "text", + "content": "Yijia Shao, Yucheng Jiang, Theodore A. Kanell, Peter Xu, Omar Khattab, and Monica S. Lam. 2024b. Assisting in writing wikipedia-like articles from scratch with large language models. Preprint, arXiv:2402.14207." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 69, + 524, + 289, + 590 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 524, + 289, + 590 + ], + "spans": [ + { + "bbox": [ + 69, + 524, + 289, + 590 + ], + "type": "text", + "content": "Michael D. Skarlinski, Sam Cox, Jon M. Laurent, James D. Braza, Michaela M. Hinks, Michael J Hammerling, Manvitha Ponnapati, Samuel G. Rodriques, and Andrew D. White. 2024. Language agents achieve superhuman synthesis of scientific knowledge. ArXiv, abs/2409.13740." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 69, + 600, + 289, + 655 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 600, + 289, + 655 + ], + "spans": [ + { + "bbox": [ + 69, + 600, + 289, + 655 + ], + "type": "text", + "content": "Aviv Slobodkin, Eran Hirsch, Arie Cattan, Tal Schuster, and Ido Dagan. 2024. Attribute first, then generate: Locally-attributable grounded text generation. In Annual Meeting of the Association for Computational Linguistics." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 69, + 665, + 289, + 698 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 665, + 289, + 698 + ], + "spans": [ + { + "bbox": [ + 69, + 665, + 289, + 698 + ], + "type": "text", + "content": "Aivin V. Solatorio. 2024. Gistembed: Guided in-sample selection of training negatives for text embedding fine-tuning. ArXiv, abs/2402.16829." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 69, + 708, + 289, + 773 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 708, + 289, + 773 + ], + "spans": [ + { + "bbox": [ + 69, + 708, + 289, + 773 + ], + "type": "text", + "content": "Saba Sturua, Isabelle Mohr, Mohammad Kalim Akram, Michael Gunther, Bo Wang, Markus Kimmel, Feng Wang, Georgios Mastrupas, Andreas Koukounas, Andreas Koukounas, Nan Wang, and Han Xiao. 2024. jina-embeddings-v3: Multilingual embeddings with task lora. Preprint, arXiv:2409.10173." + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 304, + 72, + 524, + 382 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 304, + 72, + 524, + 126 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 524, + 126 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 524, + 126 + ], + "type": "text", + "content": "Liang Wang, Nan Yang, Xiaolong Huang, Binxing Jiao, Linjun Yang, Daxin Jiang, Rangan Majumder, and Furu Wei. 2022. Text embeddings by weakly-supervised contrastive pre-training. arXiv preprint arXiv:2212.03533." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 136, + 524, + 190 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 136, + 524, + 190 + ], + "spans": [ + { + "bbox": [ + 304, + 136, + 524, + 190 + ], + "type": "text", + "content": "Xiangchao Yan, Shiyang Feng, Jiakang Yuan, Renqiu Xia, Bin Wang, Bo Zhang, and Lei Bai. 2025. Surveyforge: On the outline heuristics, memory-driven generation, and multi-dimensional evaluation for automated survey writing." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 200, + 524, + 221 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 200, + 524, + 221 + ], + "spans": [ + { + "bbox": [ + 304, + 200, + 524, + 221 + ], + "type": "text", + "content": "You.com. 2024. You.com - personalized ai search. Accessed: 2025-03-28." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 230, + 524, + 307 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 230, + 524, + 307 + ], + "spans": [ + { + "bbox": [ + 304, + 230, + 524, + 307 + ], + "type": "text", + "content": "Brian Zhang, Eric Mitchell, Hongyu Ren, Kevin Lu, Max Schwarzer, Michelle Pokrass, Shengjia Zhao, Ted Sanders, Adam Kalai, Alexandre Passos, Benjamin Sokolowsky, Elaine Ya Le, Erik Ritter, Hao Sheng, Hanson Wang, Ilya Kostrikov, James Lee, Johannes Ferstad, Michael Lampe, and 93 others. 2025. Openai o3-mini system card." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 316, + 524, + 382 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 316, + 524, + 382 + ], + "spans": [ + { + "bbox": [ + 304, + 316, + 524, + 382 + ], + "type": "text", + "content": "Zekun Zhou, Xiaocheng Feng, Lei Huang, Xiachong Feng, Ziyun Song, Ruihan Chen, Liang Zhao, Weitao Ma, Yuxuan Gu, Baoxin Wang, Dayong Wu, Guoping Hu, Ting Liu, and Bing Qin. 2025. From hypothesis to publication: A comprehensive survey of ai-driven research support systems." + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [], + "page_size": [ + 595, + 841 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 68, + 71, + 208, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 71, + 208, + 85 + ], + "spans": [ + { + "bbox": [ + 68, + 71, + 208, + 85 + ], + "type": "text", + "content": "A Python Package Usage" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 91, + 290, + 145 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 91, + 290, + 145 + ], + "spans": [ + { + "bbox": [ + 67, + 91, + 290, + 145 + ], + "type": "text", + "content": "Figure 5 shows a minimal example of running the system pipeline with the ai2-scholar-qa python package and how every component can be extended or modified as the users see fit." + } + ] + } + ], + "index": 1 + }, + { + "type": "code", + "bbox": [ + 70, + 151, + 290, + 343 + ], + "blocks": [ + { + "bbox": [ + 70, + 151, + 290, + 343 + ], + "lines": [ + { + "bbox": [ + 70, + 151, + 290, + 343 + ], + "spans": [ + { + "bbox": [ + 70, + 151, + 290, + 343 + ], + "type": "text", + "content": "from scholarqa rag. reranker. reranker_base import CrossEncoderScores \nfrom scholarqa rag. retrieval import PaperFinderWithReranker \nfrom scholarqa rag. retriever_base import FullTextRetriever \nfrom scholarqa import ScholarQA \nCLAUSE_SONNET_3_7 = \"anthropic/clause-3-7-sonnet-20250219\" \n#Extends the scholarqa rag. retrieval.AAbstractRetriever class \nretriever " + }, + { + "bbox": [ + 70, + 151, + 290, + 343 + ], + "type": "inline_equation", + "content": "=" + }, + { + "bbox": [ + 70, + 151, + 290, + 343 + ], + "type": "text", + "content": " FullTextRetriever(n_retrieval=256, n_keyword_shrc=20) \n#Extends the scholarqa rag. reranker. reranker_base.AAbstractReranker class \nreranker " + }, + { + "bbox": [ + 70, + 151, + 290, + 343 + ], + "type": "inline_equation", + "content": "=" + }, + { + "bbox": [ + 70, + 151, + 290, + 343 + ], + "type": "text", + "content": " CrossEncoderScores(\"mixedbread-ai/mxbai-erank-large-v1\") \n#Wrapper class for retrieval \npaper_find " + }, + { + "bbox": [ + 70, + 151, + 290, + 343 + ], + "type": "inline_equation", + "content": "\\equiv" + }, + { + "bbox": [ + 70, + 151, + 290, + 343 + ], + "type": "text", + "content": " PaperFinderWithReranker(retriever, reranker, n_ rerank=50, context_threshold=0.5) \n#Scholar QA wrapper with the MultiStepQAPipeline integrated \nscholar_qa " + }, + { + "bbox": [ + 70, + 151, + 290, + 343 + ], + "type": "inline_equation", + "content": "=" + }, + { + "bbox": [ + 70, + 151, + 290, + 343 + ], + "type": "text", + "content": " ScholarQA(paper_find, llm_model " + }, + { + "bbox": [ + 70, + 151, + 290, + 343 + ], + "type": "inline_equation", + "content": "\\coloneqq" + }, + { + "bbox": [ + 70, + 151, + 290, + 343 + ], + "type": "text", + "content": " CLAUSEDSONNET_3_7) \nprint(scholar_qa answer_query(\"Which is the 9th planet in our solar system?\")) \n#Custom MultiStepQAPipeline class/steps \nfrom scholarqa rag. multi_step_qapipeline import MultiStepQAPipeline \nmqapipeline " + }, + { + "bbox": [ + 70, + 151, + 290, + 343 + ], + "type": "inline_equation", + "content": "=" + }, + { + "bbox": [ + 70, + 151, + 290, + 343 + ], + "type": "text", + "content": " MultiStepQAPipeline(llm_model " + }, + { + "bbox": [ + 70, + 151, + 290, + 343 + ], + "type": "inline_equation", + "content": "\\coloneqq" + }, + { + "bbox": [ + 70, + 151, + 290, + 343 + ], + "type": "text", + "content": " CLAUSEDSONNET_3_7) \npaperquotes " + }, + { + "bbox": [ + 70, + 151, + 290, + 343 + ], + "type": "inline_equation", + "content": "=" + }, + { + "bbox": [ + 70, + 151, + 290, + 343 + ], + "type": "text", + "content": " mqapipeline step_select Quotes(query,...)#Quote Extraction \nplan " + }, + { + "bbox": [ + 70, + 151, + 290, + 343 + ], + "type": "inline_equation", + "content": "=" + }, + { + "bbox": [ + 70, + 151, + 290, + 343 + ], + "type": "text", + "content": " mqapipeline step_clustering(query, paperquotes,...)#Outline and Clustering \n#Section Generation \nresponse " + }, + { + "bbox": [ + 70, + 151, + 290, + 343 + ], + "type": "inline_equation", + "content": "=" + }, + { + "bbox": [ + 70, + 151, + 290, + 343 + ], + "type": "text", + "content": " list(mqapipeline generate_iterations.summary(query, paperquotes, plan,...))" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "code_body" + } + ], + "index": 2, + "sub_type": "code", + "guess_lang": "python" + }, + { + "bbox": [ + 68, + 387, + 240, + 401 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 387, + 240, + 401 + ], + "spans": [ + { + "bbox": [ + 68, + 387, + 240, + 401 + ], + "type": "text", + "content": "B Document Relevance Prompt" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 407, + 291, + 447 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 407, + 291, + 447 + ], + "spans": [ + { + "bbox": [ + 67, + 407, + 291, + 447 + ], + "type": "text", + "content": "We used the following prompt to obtain binary relevance labels, which agreed with human annotators " + }, + { + "bbox": [ + 67, + 407, + 291, + 447 + ], + "type": "inline_equation", + "content": "80\\%" + }, + { + "bbox": [ + 67, + 407, + 291, + 447 + ], + "type": "text", + "content": " of the time:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 89, + 451, + 270, + 512 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 451, + 270, + 512 + ], + "spans": [ + { + "bbox": [ + 89, + 451, + 270, + 512 + ], + "type": "text", + "content": "If any part of the following text is relevant to the following question, then return 1, otherwise return 0. Non-english results are not relevant, results which are primarily tables are not relevant." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 522, + 274, + 536 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 522, + 274, + 536 + ], + "spans": [ + { + "bbox": [ + 67, + 522, + 274, + 536 + ], + "type": "text", + "content": "C Retrieval Tuning Query Generation" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 543, + 291, + 691 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 543, + 291, + 691 + ], + "spans": [ + { + "bbox": [ + 67, + 543, + 291, + 691 + ], + "type": "text", + "content": "Queries for the dev set were obtained from three internal sources of human research questions, and a set of LLM generations. We experimented with several methods for constructing the synthetic LLM questions. Our approach was to generate questions similar to those asked by real users by prompting the LLM to output: (1) a question based on paragraphs retrieved from the corpus, and (2) a \"more general\" version of the first question. We only use the \"more general\" set since they were more similar to real user queries." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 700, + 259, + 714 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 700, + 259, + 714 + ], + "spans": [ + { + "bbox": [ + 67, + 700, + 259, + 714 + ], + "type": "text", + "content": "D Embedding Models for Retrieval" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 67, + 720, + 291, + 774 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 720, + 291, + 774 + ], + "spans": [ + { + "bbox": [ + 67, + 720, + 291, + 774 + ], + "type": "text", + "content": "We experimented with multiple top embedding models from the MTEB leader board to optimize retrieval for our system. These are outlined in Table 4." + } + ] + } + ], + "index": 10 + }, + { + "type": "table", + "bbox": [ + 310, + 68, + 518, + 176 + ], + "blocks": [ + { + "bbox": [ + 310, + 68, + 518, + 176 + ], + "lines": [ + { + "bbox": [ + 310, + 68, + 518, + 176 + ], + "spans": [ + { + "bbox": [ + 310, + 68, + 518, + 176 + ], + "type": "table", + "html": "
HuggingFace embedding model name
Snowflake/snowflake-arctic-embed-m5
sentence-transformers/all-mpnet-base-v2 (Reimers and Gurevych, 2019)
avsolatorio/GIST-Embedding-v0 (Solatorio, 2024)
Snowflake/snowflake-arctic-embed-m-long6
intfloat/e5-base-v2 (Wang et al., 2022)
mixedbread-ai/mxbai-embed-large-v1 (Lee et al., 2024)
jinaai/jina-embeddings-v3 (Sturua et al., 2024)
", + "image_path": "26e2dc0222a124557de7ef89388bbab16efa7595101dd2575acfdafeb460f0ba.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "table_body" + } + ], + "index": 11 + }, + { + "bbox": [ + 314, + 185, + 513, + 196 + ], + "lines": [ + { + "bbox": [ + 314, + 185, + 513, + 196 + ], + "spans": [ + { + "bbox": [ + 314, + 185, + 513, + 196 + ], + "type": "text", + "content": "Table 4: Embedding Models to optimize retrieval" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 302, + 216, + 495, + 231 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 216, + 495, + 231 + ], + "spans": [ + { + "bbox": [ + 302, + 216, + 495, + 231 + ], + "type": "text", + "content": "E Retrieval Ensemble Experiments" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 302, + 238, + 526, + 318 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 238, + 526, + 318 + ], + "spans": [ + { + "bbox": [ + 302, + 238, + 526, + 318 + ], + "type": "text", + "content": "Figure 6 shows results of our ensembling experiments for the full-text retrieval index. SparseEmbed introduces an overhead with minimal performance gains, so we picked an ensemble of embedding similarity and BM25 as our final ranking metric." + } + ] + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 308, + 332, + 518, + 487 + ], + "blocks": [ + { + "bbox": [ + 91, + 352, + 266, + 364 + ], + "lines": [ + { + "bbox": [ + 91, + 352, + 266, + 364 + ], + "spans": [ + { + "bbox": [ + 91, + 352, + 266, + 364 + ], + "type": "text", + "content": "Figure 5: ai2-scholar-qa usage example" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 308, + 332, + 518, + 487 + ], + "lines": [ + { + "bbox": [ + 308, + 332, + 518, + 487 + ], + "spans": [ + { + "bbox": [ + 308, + 332, + 518, + 487 + ], + "type": "image", + "image_path": "01163041117b4702addfc2828f34282b7c07a400177ffc2748176f9968e4d482.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 302, + 501, + 525, + 550 + ], + "lines": [ + { + "bbox": [ + 302, + 501, + 525, + 550 + ], + "spans": [ + { + "bbox": [ + 302, + 501, + 525, + 550 + ], + "type": "text", + "content": "Figure 6: Ranking performance for various ensembles with relative size of the index required. Excluding SparseEmbed reduces the index size by " + }, + { + "bbox": [ + 302, + 501, + 525, + 550 + ], + "type": "inline_equation", + "content": "20\\%" + }, + { + "bbox": [ + 302, + 501, + 525, + 550 + ], + "type": "text", + "content": " without a significant drop in ranking performance." + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + } + ], + "index": 15 + }, + { + "bbox": [ + 302, + 575, + 502, + 589 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 575, + 502, + 589 + ], + "spans": [ + { + "bbox": [ + 302, + 575, + 502, + 589 + ], + "type": "text", + "content": "F Prompt for Evaluating Attribution" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 323, + 594, + 504, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 323, + 594, + 504, + 775 + ], + "spans": [ + { + "bbox": [ + 323, + 594, + 504, + 775 + ], + "type": "text", + "content": "As an Attribution Validator, your task is to verify whether a given reference can support the given claim. A claim can be either a plain sentence or a question followed by its answer. Specifically, your response should clearly indicate the relationship: Attributable, Contradictory or Extrapolatory. A contradictory error occurs when you can infer that the answer contradicts the fact presented in the context, while an extrapolatory error means that you cannot infer the correctness of the answer based on the information provided in the context. Output your response as a json with only a single key \"output\" and a value of one among (\"Attributable\", \"Contradictory\"," + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [], + "page_size": [ + 595, + 841 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 90, + 73, + 191, + 104 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 90, + 73, + 191, + 104 + ], + "spans": [ + { + "bbox": [ + 90, + 73, + 191, + 104 + ], + "type": "text", + "content": "\"Extrapolatory\"). \nClaim: claim \nReference: ref_excerpt" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 68, + 115, + 221, + 130 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 115, + 221, + 130 + ], + "spans": [ + { + "bbox": [ + 68, + 115, + 221, + 130 + ], + "type": "text", + "content": "G User Feedback Examples" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 137, + 289, + 163 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 137, + 289, + 163 + ], + "spans": [ + { + "bbox": [ + 67, + 137, + 289, + 163 + ], + "type": "text", + "content": "Table 5 lists some examples of the user complaints for Scholar QA reports." + } + ] + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 69, + 173, + 296, + 318 + ], + "blocks": [ + { + "bbox": [ + 69, + 173, + 296, + 318 + ], + "lines": [ + { + "bbox": [ + 69, + 173, + 296, + 318 + ], + "spans": [ + { + "bbox": [ + 69, + 173, + 296, + 318 + ], + "type": "table", + "html": "
Feedback
The structure is good, but the articles you choose are not from top journals.
The first citation says that rabbits can obtain cholesterol from diet, not rats.
These provide a lot of general information about the topic, but nothing here actually addresses the central question I asked.
The answer did not address the ‘MOBILIZATION’ techniques at all! The answer is wrong because it addressed Exercise therapy!
They address the general setting, but not the specific question I asked.
It’s only analysing on SASAF model, but there are more.
", + "image_path": "38a8653ec599447018cb8bf10426f6f449740837923b31257f25fb40d6962518.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 362, + 287, + 377 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 362, + 287, + 377 + ], + "spans": [ + { + "bbox": [ + 67, + 362, + 287, + 377 + ], + "type": "text", + "content": "H Progress Updates and Report Sections" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 385, + 289, + 478 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 385, + 289, + 478 + ], + "spans": [ + { + "bbox": [ + 67, + 385, + 289, + 478 + ], + "type": "text", + "content": "Figure 7 demonstrates how we display in real-time the progress of the system during generation. This included number of papers and passages the were processed in each step, as well as the outline as it is being generated. Each section appears as soon as it is generated, so users can begin browsing the first sections." + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 69, + 486, + 289, + 678 + ], + "blocks": [ + { + "bbox": [ + 69, + 486, + 289, + 678 + ], + "lines": [ + { + "bbox": [ + 69, + 486, + 289, + 678 + ], + "spans": [ + { + "bbox": [ + 69, + 486, + 289, + 678 + ], + "type": "image", + "image_path": "ce3b077c2ebdcf5f7466cd6d35673d3f0ab2172dcdb52e8846aace40f300e321.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 73, + 687, + 285, + 700 + ], + "lines": [ + { + "bbox": [ + 73, + 687, + 285, + 700 + ], + "spans": [ + { + "bbox": [ + 73, + 687, + 285, + 700 + ], + "type": "text", + "content": "Figure 7: Progress indication and section streaming." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 725, + 195, + 740 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 725, + 195, + 740 + ], + "spans": [ + { + "bbox": [ + 67, + 725, + 195, + 740 + ], + "type": "text", + "content": "I Query Type Analysis" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 67, + 748, + 290, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 748, + 290, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 748, + 290, + 775 + ], + "type": "text", + "content": "To analyze the types of questions users are asking, we use an LLM to categorize the queries. The most" + } + ] + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 313, + 85, + 523, + 212 + ], + "blocks": [ + { + "bbox": [ + 313, + 85, + 523, + 212 + ], + "lines": [ + { + "bbox": [ + 313, + 85, + 523, + 212 + ], + "spans": [ + { + "bbox": [ + 313, + 85, + 523, + 212 + ], + "type": "image", + "image_path": "daf247f1ede05b107084c0cddaa32843207f0c0fac484c5bea2ddc5880c2935f.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 302, + 223, + 525, + 248 + ], + "lines": [ + { + "bbox": [ + 302, + 223, + 525, + 248 + ], + "spans": [ + { + "bbox": [ + 302, + 223, + 525, + 248 + ], + "type": "text", + "content": "Figure 8: Distribution of different question types submitted to Scholar QA deployed Web application." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "bbox": [ + 302, + 269, + 525, + 336 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 269, + 525, + 336 + ], + "spans": [ + { + "bbox": [ + 302, + 269, + 525, + 336 + ], + "type": "text", + "content": "prominent types were comprehensive deep-dive into a specific research topic (15k) and comparative analysis of prior work (5k). Other themes such as factoid QA or specific methods, datasets accounted for fewer queries." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 302, + 346, + 493, + 372 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 346, + 493, + 372 + ], + "spans": [ + { + "bbox": [ + 302, + 346, + 493, + 372 + ], + "type": "text", + "content": "J Generation Results with updated GPT-40" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 302, + 381, + 525, + 490 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 381, + 525, + 490 + ], + "spans": [ + { + "bbox": [ + 302, + 381, + 525, + 490 + ], + "type": "text", + "content": "Table 6 shows results on ScholarQA-CS with gpt-4o-2024-11-20 as the LLM judge. These results can be contrasted with the first two columns in Table 2 which are obtained with gpt-4o-2024-08-06 as the judge. Even though the absolute scores are inflated compared to Table 2, the relative rankings are about the same with Scholar QA getting the best overall score." + } + ] + } + ], + "index": 15 + }, + { + "type": "table", + "bbox": [ + 310, + 499, + 518, + 613 + ], + "blocks": [ + { + "bbox": [ + 82, + 326, + 274, + 338 + ], + "lines": [ + { + "bbox": [ + 82, + 326, + 274, + 338 + ], + "spans": [ + { + "bbox": [ + 82, + 326, + 274, + 338 + ], + "type": "text", + "content": "Table 5: Example Feedback on Research Issues" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 310, + 499, + 518, + 613 + ], + "lines": [ + { + "bbox": [ + 310, + 499, + 518, + 613 + ], + "spans": [ + { + "bbox": [ + 310, + 499, + 518, + 613 + ], + "type": "table", + "html": "
ModelScoreModelScore
RubricsTotalRubricsTotal
LLM Prompting (No Retrieval)QA Systems
Llama 3.1-8B51.848.2SQA-Claude 3.7 S67.367.2
Llama 3.1-70B57.051.2SQA-Claude 3.5 S61.367.1
Claude 3.5 S57.851.3OS-GPT-4o54.959.9
Claude 3.7 S68.460.8PaperQA243.854.1
+Thinking68.358.7Perplex. Sonar DR43.956.0
GPT-4.169.361.8STORM59.264.7
o1-mini69.161.3
o3-mini68.555.9
", + "image_path": "ec626c759b741cd79197869ce8ff27fd318689aae8160e428396774589d3982f.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "table_body" + } + ], + "index": 16 + }, + { + "bbox": [ + 302, + 620, + 526, + 740 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 620, + 526, + 740 + ], + "spans": [ + { + "bbox": [ + 302, + 620, + 526, + 740 + ], + "type": "text", + "content": "Table 6: Evaluation results on ScholarQA-CS benchmark with gpt-4o-2024-11-20 as the judge. System responses are either generated by simply prompting LLMs with the questions or by issuing the queries to RAG based QA systems. Expert annotated rubrics only scores are reported in addition to the overall total. The overall best results are highlighted and best results within a category are underlined. SQA: Ai2 Scholar QA, OS: Open Scholar, S: Sonnet, Claude 3.5 S: claude-3-5-sonnet-20241022." + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [], + "page_size": [ + 595, + 841 + ], + "page_idx": 10 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2504_11xxx/2504.11491/0400dc9e-bb51-4dac-9ac6-e38f3b9731ae_content_list.json b/data/2025/2504_11xxx/2504.11491/0400dc9e-bb51-4dac-9ac6-e38f3b9731ae_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..5269cb2d37b17ab9209b74487fb36ff4d67dad6b --- /dev/null +++ b/data/2025/2504_11xxx/2504.11491/0400dc9e-bb51-4dac-9ac6-e38f3b9731ae_content_list.json @@ -0,0 +1,859 @@ +[ + { + "type": "text", + "text": "Attention GhostUNet++: Enhanced Segmentation of Adipose Tissue and Liver in CT Images", + "text_level": 1, + "bbox": [ + 84, + 87, + 911, + 137 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Mansoor Hayat1, Supavadee Aramvith2, Subrata Bhattacharjee3 and Nouman Ahmad4", + "bbox": [ + 174, + 157, + 815, + 175 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract—Accurate segmentation of abdominal adipose tissue, including subcutaneous (SAT) and visceral adipose tissue (VAT), along with liver segmentation, is essential for understanding body composition and associated health risks such as type 2 diabetes and cardiovascular disease. This study proposes Attention GhostUNet++, a novel deep learning model incorporating Channel, Spatial, and Depth Attention mechanisms into the Ghost UNet++ bottleneck for automated, precise segmentation. Evaluated on the AATTCT-IDS and LiTS datasets, the model achieved Dice coefficients of 0.9430 for VAT, 0.9639 for SAT, and 0.9652 for liver segmentation, surpassing baseline models. Despite minor limitations in boundary detail segmentation, the proposed model significantly enhances feature refinement, contextual understanding, and computational efficiency, offering a robust solution for body composition analysis. The implementation of the proposed Attention GhostUNet++ model is available at: https://github.com/MansoorHayat777/Attention-GhostUNetPlusPlus.", + "bbox": [ + 81, + 202, + 488, + 443 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Clinical relevance—The Attention GhostUNet++ model offers a significant advancement in the automated segmentation of adipose tissue and liver regions from CT images. Accurate delineation of visceral and subcutaneous adipose tissue, alongside liver structures, is critical for clinicians managing cardiometabolic disorders, including type 2 diabetes and cardiovascular diseases. By reducing reliance on manual annotations, the model enhances efficiency and scalability, paving the way for its integration into routine clinical workflows and large-scale body composition studies.", + "bbox": [ + 81, + 454, + 488, + 583 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "I. INTRODUCTION", + "text_level": 1, + "bbox": [ + 218, + 595, + 352, + 608 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Obesity is a significant risk factor for cardiometabolic diseases, including type 2 diabetes (T2D), cardiovascular disease (CVD), non-alcoholic fatty liver disease, and hypertension [1] [2]. Body composition (BC) analysis focuses on the distribution of fatty and non-fatty tissues, particularly in depots such as adipose tissue, muscle, liver, and bone, playing a crucial role in predicting and preventing these diseases [3]. Among adipose tissue compartments, visceral adipose tissue (VAT) and subcutaneous adipose tissue (SAT) are key. VAT, located within the abdominal cavity [4], [5]. Similarly, SAT, located beneath the skin [3] [6], [7], [8], [17].", + "bbox": [ + 81, + 616, + 488, + 782 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "This study introduces Attention GhostUNet++, an advanced deep learning model that incorporates Channel, Spatial, and Depth Attention mechanisms into the Ghost UNet++ bottleneck for automated segmentation of VAT, SAT, and liver regions from CT images. Using the AATTCT-IDS [10] and LiTS [11] datasets, the model achieved Dice coefficients of 0.9430 (VAT), 0.9639 (SAT), and 0.9652 (liver),", + "bbox": [ + 81, + 782, + 488, + 888 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "This research is funded by Thailand Science Research and Innovation Fund Chulalongkorn University (IND_FF_68_280_2100_039).", + "bbox": [ + 81, + 901, + 488, + 926 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "outperforming baseline models in most metrics. However, challenges remain in SAT and VAT boundary segmentation, where Jaccard indices of 0.9639 (SAT) and 0.9430 (VAT) fell short of UNet's performance in specific cases.", + "bbox": [ + 504, + 200, + 911, + 261 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The proposed model reduces reliance on manual annotation, providing an efficient, scalable solution for BC analysis. It addresses challenges in large-scale imaging studies, paving the way for broader applications in multi-class segmentation and personalized healthcare, with future work focusing on boundary refinement and dataset generalization.", + "bbox": [ + 504, + 262, + 911, + 353 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "METHODOLOGY", + "text_level": 1, + "bbox": [ + 650, + 361, + 767, + 375 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Ghost Module", + "text_level": 1, + "bbox": [ + 506, + 380, + 607, + 393 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The Ghost-UNet [9] architecture is designed to enhance computational efficiency by minimizing redundancy in feature maps. Traditional convolution operations are computationally intensive for generating high-dimensional feature maps. The Ghost module addresses this limitation by employing lightweight operations to generate ghost feature maps, which are lower-resolution representations of the input features. This significantly improves efficiency without sacrificing accuracy.", + "bbox": [ + 504, + 398, + 911, + 535 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The Ghost module is mathematically expressed as:", + "bbox": [ + 522, + 535, + 867, + 550 + ], + "page_idx": 0 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {F} _ {o} = \\gamma (\\mathbf {W} * \\mathbf {F} _ {i}) + \\beta\n$$\n", + "text_format": "latex", + "bbox": [ + 640, + 558, + 774, + 575 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "where:", + "bbox": [ + 506, + 583, + 555, + 595 + ], + "page_idx": 0 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- $\\mathbf{F}_o$ : High-resolution output feature map,", + "- $\\mathbf{F}_i$ : Low-resolution ghost feature map,", + "- W: Weight tensor of the ghost convolution layer,", + "- $\\gamma$ and $\\beta$ : Learnable scale and shift parameters,", + "$\\star$ Ghost convolution operation." + ], + "bbox": [ + 522, + 599, + 870, + 675 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The ghost convolution operation is further defined as:", + "bbox": [ + 522, + 678, + 887, + 693 + ], + "page_idx": 0 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {F} _ {i} [ m ] = \\sum_ {n} W [ n, m ] * \\mathbf {X} [ n ] + b\n$$\n", + "text_format": "latex", + "bbox": [ + 611, + 699, + 807, + 727 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "where $m$ represents the ghost channels, $n$ is the input feature map index, and $b$ denotes the bias term.", + "bbox": [ + 504, + 732, + 911, + 762 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Attention GhostUNet++ Architecture", + "text_level": 1, + "bbox": [ + 504, + 771, + 758, + 785 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "In the proposed Attention GhostUNet++, Ghost bottleneck layers are integrated into the UNet++ architecture, each enhanced with Channel, Spatial, and Depth Attention mechanisms. These mechanisms improve feature refinement by dynamically emphasizing relevant regions in the feature maps while suppressing redundant information. The network consists of 15 bottleneck layers arranged within a nested architecture that supports robust contraction and expansion paths:", + "bbox": [ + 504, + 789, + 911, + 926 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.11491v1 [eess.IV] 14 Apr 2025", + "bbox": [ + 22, + 261, + 58, + 717 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/5079be29d2c99a728fa0fdb107927d0284c0f855bf5a5d187f005a1314ab8877.jpg", + "image_caption": [ + "Fig. 1. Attention GhostUNet++ Architecture." + ], + "image_footnote": [], + "bbox": [ + 98, + 65, + 916, + 441 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Contraction Path: Extracts features at multiple resolutions.", + "- Expansion Path: Reconstructs feature maps for precise segmentation." + ], + "bbox": [ + 98, + 498, + 486, + 556 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Each bottleneck layer in the network can be represented as:", + "bbox": [ + 81, + 561, + 488, + 588 + ], + "page_idx": 1 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {F} _ {o} = \\mathcal {A} (\\mathcal {G} (\\mathbf {F} _ {i}, \\Theta))\n$$\n", + "text_format": "latex", + "bbox": [ + 220, + 590, + 351, + 607 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "where:", + "text_level": 1, + "bbox": [ + 83, + 614, + 133, + 627 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- $\\mathbf{F}_i$ : Input tensor,", + "- $\\mathbf{F}_o$ : Output tensor,", + "$\\mathcal{G}$ : Ghost bottleneck layer,", + "$\\mathcal{A}$ : Attention mechanism,", + "- $\\Theta$ : Set of learnable parameters." + ], + "bbox": [ + 99, + 632, + 328, + 705 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "At each hierarchical level $l$ in the network, the feature maps are calculated as:", + "bbox": [ + 81, + 709, + 488, + 738 + ], + "page_idx": 1 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {F} _ {l} = \\mathcal {P} (\\mathcal {G} (\\mathbf {F} _ {l - 1})) + \\mathcal {U} (\\mathcal {G} (\\mathbf {F} _ {l + 1}))\n$$\n", + "text_format": "latex", + "bbox": [ + 169, + 746, + 401, + 763 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "where:", + "text_level": 1, + "bbox": [ + 83, + 771, + 133, + 782 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "$\\mathcal{P}$ : Pooling operation,", + "$\\mathcal{U}$ : Up-sampling operation,", + "$\\mathcal{G}$ : Ghost module." + ], + "bbox": [ + 99, + 787, + 303, + 830 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The final output of the network is:", + "bbox": [ + 99, + 835, + 336, + 849 + ], + "page_idx": 1 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {F} _ {\\text {f i n a l}} = \\mathbf {F} _ {1} + \\mathbf {F} _ {2} + \\dots + \\mathbf {F} _ {n}\n$$\n", + "text_format": "latex", + "bbox": [ + 196, + 858, + 375, + 875 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "where $n$ is the number of hierarchical levels in the architecture. This innovative integration of Ghost bottleneck layers with attention mechanisms enables Attention GhostUNet++", + "bbox": [ + 81, + 881, + 488, + 926 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "to achieve high accuracy, computational efficiency, and improved feature refinement, making it a robust solution for medical image segmentation.", + "bbox": [ + 504, + 498, + 913, + 544 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "II. EXPERIMENTAL RESULTS", + "text_level": 1, + "bbox": [ + 604, + 558, + 815, + 571 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Datasets", + "text_level": 1, + "bbox": [ + 506, + 580, + 570, + 593 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Datasets and Preprocessing", + "text_level": 1, + "bbox": [ + 506, + 601, + 697, + 616 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "This study evaluates the proposed Attention GhostUNet++ model using two datasets: the Abdominal Adipose Tissue CT Image Dataset (AATTCT-IDS) [10] and the Liver Tumor Segmentation Benchmark (LiTS) [11]. These datasets provide annotated CT images for segmenting SAT, VAT, and liver regions.", + "bbox": [ + 504, + 622, + 913, + 712 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The AATTCT-IDS dataset [10] includes 13,732 CT slices (3,213 annotated) from 300 subjects, focusing on SAT and VAT. Challenges arise from individual variability and overlapping boundaries between these compartments. The LiTS dataset [11] contains 201 CT volumes annotated for liver regions, featuring diverse liver shapes and pathologies. Liver segmentation was prioritized in this study.", + "bbox": [ + 504, + 713, + 913, + 819 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Preprocessing included resizing CT slices to a consistent resolution, normalizing pixel intensities, and cropping volumes to focus on regions of interest. Data augmentation (e.g., rotations, flipping, scaling) was applied to enhance variability and reduce overfitting.", + "bbox": [ + 504, + 820, + 911, + 895 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "These datasets were critical for validating the robustness and generalizability of the Attention GhostUNet++ model", + "bbox": [ + 504, + 896, + 913, + 926 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/7b5c57a797698a60272f63df9d03e910ea3876702b26ae0df2950ec47105d257.jpg", + "image_caption": [ + "Fig. 2. Segmentation results for randomly selected CT image examples, the columns represent (1) the original CT image, (2) ground truth annotations, (3) model-predicted segmentation output, (4) mask differences between ground truth and predictions, (5) predicted segmentation masks overlaid on the original CT image." + ], + "image_footnote": [], + "bbox": [ + 89, + 61, + 910, + 373 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "in segmenting complex anatomical structures across varying scenarios.", + "bbox": [ + 81, + 453, + 488, + 482 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Training Settings", + "text_level": 1, + "bbox": [ + 83, + 493, + 202, + 508 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The proposed Attention GhostUNet++ model was implemented in PyTorch 2.0 and trained on an Nvidia 3090Ti GPU. Xavier initialization ensured effective weight scaling. The datasets were split into training, validation, and test sets (70:20:10), with data augmentation (rotations, flipping, scaling, and intensity variations) applied to improve generalization.", + "bbox": [ + 81, + 512, + 488, + 617 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Training used the Adam optimizer [16] $(1\\times 10^{-4}$ initial learning rate, cosine annealing decay) and a combined Dice and cross-entropy loss. Mini-batches of size 16 optimized GPU usage, while early stopping (100-epoch patience) prevented overfitting. The best-performing model was selected based on the validation Dice coefficient (DC).", + "bbox": [ + 81, + 618, + 488, + 708 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Segmentation accuracy was evaluated using the DC and Jaccard index (JI):", + "bbox": [ + 81, + 709, + 488, + 739 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\text {D i c e} = \\frac {2 | \\mathbf {P} \\cap \\mathbf {G} |}{| \\mathbf {P} | + | \\mathbf {G} |} = \\frac {2 \\sum_ {i} P _ {i} G _ {i}}{\\sum_ {i} P _ {i} + \\sum_ {i} G _ {i}}\n$$\n", + "text_format": "latex", + "bbox": [ + 135, + 744, + 436, + 779 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\text {J a c c a r d I n d e x} = \\frac {\\left| \\mathbf {P} \\cap \\mathbf {G} \\right|}{\\left| \\mathbf {P} \\cup \\mathbf {G} \\right|} = \\frac {\\sum_ {i} P _ {i} G _ {i}}{\\sum_ {i} P _ {i} + \\sum_ {i} G _ {i} - \\sum_ {i} P _ {i} G _ {i}}\n$$\n", + "text_format": "latex", + "bbox": [ + 117, + 782, + 452, + 816 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $P_{i}$ and $G_{i}$ represent the predicted and ground truth segmentation at pixel $i$ , respectively.", + "bbox": [ + 81, + 821, + 488, + 853 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Performance Evaluation", + "text_level": 1, + "bbox": [ + 83, + 862, + 250, + 876 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The performance of the proposed Attention GhostUNet++ model was compared against baseline models (UNet [12], UNet++ [13], ResUNet [14] and GhostUNet++", + "bbox": [ + 81, + 881, + 488, + 926 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "[15] for VAT, SAT, and liver. Evaluation metrics included the DC and JI, which assess segmentation accuracy and overlap, respectively.", + "bbox": [ + 504, + 453, + 911, + 498 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The proposed model outperformed the baseline models in most cases. As indicated in Table I, for VAT segmentation, it achieved a DC of 0.9430 and a JI of 0.9430, closely competing with UNet's JI of 0.9491. In SAT segmentation, it achieved a DC of 0.9639, matching UNet's JI of 0.9807 but indicating room for improvement in handling boundary details. For liver segmentation, Attention GhostUNet++ demonstrated superior performance with a Dice coefficient of 0.9652 and a JI of 0.9496, outperforming all baselines.", + "bbox": [ + 504, + 500, + 913, + 635 + ], + "page_idx": 2 + }, + { + "type": "table", + "img_path": "images/c874966e39c3429c376f6207375e6f36f90340ce39b6090ca28d05035063b76a.jpg", + "table_caption": [ + "TABLEI MEAN SEGMENTATION DICE AND JACCARD SCORES FOR DIFFERENT TARGETS IN AATTCT-IDS [10] AND LITS [11] DATASETS" + ], + "table_footnote": [], + "table_body": "
MethodMetricsVATSATLiver
UNet [12]Dice coefficient0.90570.96040.8746
Jaccard index0.94910.98070.8456
UNet++ [13]Dice coefficient0.87420.87410.9468
Jaccard index0.81570.86390.9311
ResUNet [14]Dice coefficient0.91840.94820.9587
Jaccard index0.90210.96530.9412
GhostUNet++ [15]Dice coefficient0.88470.89160.9554
Jaccard index0.79160.84510.9318
Attention GhostUNet++Dice coefficient0.94300.96390.9652
Jaccard index0.94300.96390.9496
", + "bbox": [ + 506, + 703, + 929, + 834 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "These results confirm the effectiveness of the Attention GhostUNet++ [15] model for medical image segmentation. While achieving state-of-the-art performance across most tasks, minor limitations in SAT segmentation suggest opportunities for further refinement to enhance boundary accuracy", + "bbox": [ + 504, + 851, + 913, + 926 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "and overall generalizability.", + "bbox": [ + 83, + 66, + 272, + 80 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Fig. 2 showcases example segmentation outputs from the test dataset. Ground truth annotations are displayed alongside predictions from each model. The Attention GhostUNet++ [15] model exhibits superior boundary adherence and accurate region segmentation, particularly in challenging cases where SAT and VAT boundaries overlap or are difficult to distinguish. Additionally, for liver segmentation, the proposed model effectively captures the organ's irregular contours, delivering results that clearly outperform those of the baseline models.", + "bbox": [ + 81, + 80, + 488, + 231 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "These visual comparisons underscore the model's capability to handle complex segmentation tasks with enhanced precision and boundary accuracy, making it a robust solution for medical imaging applications.", + "bbox": [ + 81, + 232, + 488, + 294 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "III. CONCLUSION", + "text_level": 1, + "bbox": [ + 220, + 305, + 352, + 319 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We proposed Attention GhostUNet++, a novel deep learning architecture for segmenting SAT, VAT, and liver regions in CT images. By integrating Channel, Spatial, and Depth Attention mechanisms into Ghost-Net bottleneck layers, the model achieves enhanced feature refinement and contextual understanding with computational efficiency.", + "bbox": [ + 81, + 325, + 488, + 416 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Experiments on AATTCT-IDS[10] and LiTS[11] datasets demonstrated state-of-the-art performance, with DCs of 0.9430 (VAT), 0.9639 (SAT), and 0.9652 (liver). Visual comparisons highlighted the model's ability to accurately segment complex anatomical structures, reducing boundary errors and outperforming baseline models.", + "bbox": [ + 81, + 417, + 488, + 507 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "This automated solution reduces reliance on manual annotations and enhances scalability for clinical and research applications. Future work will focus on addressing limitations, extending to multi-class tasks, and validating across diverse datasets, paving the way for efficient and accurate medical imaging tools.", + "bbox": [ + 81, + 508, + 488, + 599 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "LIMITATIONS AND FUTURE WORK", + "text_level": 1, + "bbox": [ + 163, + 611, + 408, + 625 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The proposed Attention GhostUNet++ model faces minor limitations in handling fine-grained boundaries, particularly for SAT segmentation, and its generalizability to diverse imaging modalities remains untested. It also requires further evaluation on datasets with significant variations in anatomical structures and imaging quality. Future work will focus on enhancing boundary detection, expanding validation to multi-class tasks and diverse datasets, and optimizing computational efficiency for clinical deployment. Additionally, integrating advanced techniques, such as edge-aware attention, could improve performance in challenging regions. Extending the model to segment complex pathological structures will further enhance its utility in diagnostic and therapeutic applications.", + "bbox": [ + 81, + 632, + 490, + 844 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "REFERENCES", + "text_level": 1, + "bbox": [ + 238, + 856, + 333, + 869 + ], + "page_idx": 3 + }, + { + "type": "ref_text", + "text": "[1] Mokdad, Ali H., Earl S. Ford, Barbara A. Bowman, William H. Dietz, Frank Vinicor, Virginia S. Bales, and James S. Marks. \"Prevalence of obesity, diabetes, and obesity-related health risk factors, 2001.\" Jama 289, no. 1 (2003): 76-79.", + "bbox": [ + 89, + 878, + 488, + 926 + ], + "page_idx": 3 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[2] Kaess, Bernhard M., Jacek Jozwiak, Miroslaw Mastej, Witold Lukas, Wladyslaw Grzeseczak, Adam Windak, Wieslawa Piwowarska et al. \"Association between anthropometric obesity measures and coronary artery disease: a cross-sectional survey of 16 657 subjects from 444 Polish cities.\" Heart 96, no. 2 (2010): 131-135.", + "[3] Kullberg, Joel, Anders Hedstrom, John Brandberg, Robin Strand, Lars Johansson, Goran Bergstrom, and Håkan Ahlström. \"Automated analysis of liver fat, muscle and adipose tissue distribution from CT suitable for large-scale studies.\" Scientific reports 7, no. 1 (2017): 10425.", + "[4] Tanaka, Muhei, Hiroshi Okada, Yoshitaka Hashimoto, Muneaki Kumagai, Hiromi Nishimura, and Michiaki Fukui. \"Distinct associations of intraperitoneal and retroperitoneal visceral adipose tissues with metabolic syndrome and its components.\" Clinical Nutrition 40, no. 5 (2021): 3479-3484..", + "[5] Tanaka, M., Okada, H., Hashimoto, Y., Kumagai, M., Nishimura, H., & Fukui, M. (2020). Intraperitoneal, but not retroperitoneal, visceral adipose tissue is associated with diabetes mellitus: a cross-sectional, retrospective pilot analysis. Diabetology & Metabolic Syndrome, 12, 1-10.", + "[6] Christen, T., Sheikine, Y., Rocha, V. Z., Hurwitz, S., Goldfine, A. B., Di Carli, M., & Libby, P. (2010). Increased glucose uptake in visceral versus subcutaneous adipose tissue revealed by PET imaging. JACC: Cardiovascular Imaging, 3(8), 843-851.", + "[7] Kelley, D. E., Thaete, F. L., Troost, F., Huwe, T., & Goodpaster, B. H. (2000). Subdivisions of subcutaneous abdominal adipose tissue and insulin resistance. American Journal of Physiology-Endocrinology and Metabolism, 278(5), E941-E948.", + "[8] Smith, S. R., Lovejoy, J. C., Greenway, F., Ryan, D., deJonge, L., de la Bretonne, J., ... & Bray, G. A. (2001). Contributions of total body fat, abdominal subcutaneous adipose tissue compartments, and visceral adipose tissue to the metabolic complications of obesity. Metabolism-Clinical and Experimental, 50(4), 425-435.", + "[9] Kazerouni, I. A., Dooly, G., & Toal, D. (2021). Ghost-UNet: an asymmetric encoder-decoder architecture for semantic segmentation from scratch. IEEE Access, 9, 97457-97465.", + "[10] Ma, Z., Li, C., Du, T., Zhang, L., Tang, D., Ma, D., ... & Sun, H. (2024). AATCT-IDS: A benchmark Abdominal Adipose Tissue CT Image Dataset for image denoising, semantic segmentation, and radiomics evaluation. Computers in Biology and Medicine, 177, 108628.", + "[11] Bilic, P., Christ, P., Li, H.B., Vorontsov, E., Ben-Cohen, A., Kaissis, G., Szeskin, A., Jacobs, C., Mamani, G.E.H., Chartrand, G. and Lohofer, F., 2023. The liver tumor segmentation benchmark (lits). Medical Image Analysis, 84, p.102680.", + "[12] Ronneberger, O., Fischer, P., & Brox, T. (2015). U-net: Convolutional networks for biomedical image segmentation. In Medical image computing and computer-assisted intervention-MICCAI 2015: 18th international conference, Munich, Germany, October 5-9, 2015, proceedings, part III 18 (pp. 234-241). Springer International Publishing.", + "[13] Zhou, Z., Rahman Siddiquee, M. M., Tajbakhsh, N., & Liang, J. (2018). Unet++: A nested u-net architecture for medical image segmentation. In Deep Learning in Medical Image Analysis and Multimodal Learning for Clinical Decision Support: 4th International Workshop, DLMIA 2018, and 8th International Workshop, ML-CDS 2018, Held in Conjunction with MICCAI 2018, Granada, Spain, September 20, 2018, Proceedings 4 (pp. 3-11). Springer International Publishing.", + "[14] Rahman, H., Bukht, T. F. N., Imran, A., Tariq, J., Tu, S., & Alzahrani, A. (2022). A deep learning approach for liver and tumor segmentation in CT images using ResUNet. Bioengineering, 9(8), 368.", + "[15] Ahmad, N., Strand, R., Sparresäter, B., Tarai, S., Lundström, E., Bergström, G., Ahlström, H. and Kullberg, J., 2023. Automatic segmentation of large-scale CT image datasets for detailed body composition analysis. BMC bioinformatics, 24(1), p.346.", + "[16] Bock, S. and Weiß, M., 2019, July. A proof of local convergence for the Adam optimizer. In 2019 international joint conference on neural networks (IJCNN) (pp. 1-8). IEEE.", + "[17] Hayat, Mansoor. \"Squeeze & Excitation joint with Combined Channel and Spatial Attention for Pathology Image Super-Resolution.\" Franklin Open 8 (2024): 100170." + ], + "bbox": [ + 509, + 66, + 913, + 862 + ], + "page_idx": 3 + } +] \ No newline at end of file diff --git a/data/2025/2504_11xxx/2504.11491/0400dc9e-bb51-4dac-9ac6-e38f3b9731ae_model.json b/data/2025/2504_11xxx/2504.11491/0400dc9e-bb51-4dac-9ac6-e38f3b9731ae_model.json new file mode 100644 index 0000000000000000000000000000000000000000..2e2418dbf30103a81c5341bff29fde3e3947490b --- /dev/null +++ b/data/2025/2504_11xxx/2504.11491/0400dc9e-bb51-4dac-9ac6-e38f3b9731ae_model.json @@ -0,0 +1,1165 @@ +[ + [ + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.262, + 0.06, + 0.718 + ], + "angle": 270, + "content": "arXiv:2504.11491v1 [eess.IV] 14 Apr 2025" + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.088, + 0.912, + 0.138 + ], + "angle": 0, + "content": "Attention GhostUNet++: Enhanced Segmentation of Adipose Tissue and Liver in CT Images" + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.158, + 0.816, + 0.176 + ], + "angle": 0, + "content": "Mansoor Hayat1, Supavadee Aramvith2, Subrata Bhattacharjee3 and Nouman Ahmad4" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.203, + 0.49, + 0.444 + ], + "angle": 0, + "content": "Abstract—Accurate segmentation of abdominal adipose tissue, including subcutaneous (SAT) and visceral adipose tissue (VAT), along with liver segmentation, is essential for understanding body composition and associated health risks such as type 2 diabetes and cardiovascular disease. This study proposes Attention GhostUNet++, a novel deep learning model incorporating Channel, Spatial, and Depth Attention mechanisms into the Ghost UNet++ bottleneck for automated, precise segmentation. Evaluated on the AATTCT-IDS and LiTS datasets, the model achieved Dice coefficients of 0.9430 for VAT, 0.9639 for SAT, and 0.9652 for liver segmentation, surpassing baseline models. Despite minor limitations in boundary detail segmentation, the proposed model significantly enhances feature refinement, contextual understanding, and computational efficiency, offering a robust solution for body composition analysis. The implementation of the proposed Attention GhostUNet++ model is available at: https://github.com/MansoorHayat777/Attention-GhostUNetPlusPlus." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.455, + 0.49, + 0.584 + ], + "angle": 0, + "content": "Clinical relevance—The Attention GhostUNet++ model offers a significant advancement in the automated segmentation of adipose tissue and liver regions from CT images. Accurate delineation of visceral and subcutaneous adipose tissue, alongside liver structures, is critical for clinicians managing cardiometabolic disorders, including type 2 diabetes and cardiovascular diseases. By reducing reliance on manual annotations, the model enhances efficiency and scalability, paving the way for its integration into routine clinical workflows and large-scale body composition studies." + }, + { + "type": "title", + "bbox": [ + 0.22, + 0.596, + 0.353, + 0.609 + ], + "angle": 0, + "content": "I. INTRODUCTION" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.617, + 0.49, + 0.784 + ], + "angle": 0, + "content": "Obesity is a significant risk factor for cardiometabolic diseases, including type 2 diabetes (T2D), cardiovascular disease (CVD), non-alcoholic fatty liver disease, and hypertension [1] [2]. Body composition (BC) analysis focuses on the distribution of fatty and non-fatty tissues, particularly in depots such as adipose tissue, muscle, liver, and bone, playing a crucial role in predicting and preventing these diseases [3]. Among adipose tissue compartments, visceral adipose tissue (VAT) and subcutaneous adipose tissue (SAT) are key. VAT, located within the abdominal cavity [4], [5]. Similarly, SAT, located beneath the skin [3] [6], [7], [8], [17]." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.784, + 0.49, + 0.89 + ], + "angle": 0, + "content": "This study introduces Attention GhostUNet++, an advanced deep learning model that incorporates Channel, Spatial, and Depth Attention mechanisms into the Ghost UNet++ bottleneck for automated segmentation of VAT, SAT, and liver regions from CT images. Using the AATTCT-IDS [10] and LiTS [11] datasets, the model achieved Dice coefficients of 0.9430 (VAT), 0.9639 (SAT), and 0.9652 (liver)," + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.902, + 0.49, + 0.927 + ], + "angle": 0, + "content": "This research is funded by Thailand Science Research and Innovation Fund Chulalongkorn University (IND_FF_68_280_2100_039)." + }, + { + "type": "text", + "bbox": [ + 0.505, + 0.202, + 0.913, + 0.262 + ], + "angle": 0, + "content": "outperforming baseline models in most metrics. However, challenges remain in SAT and VAT boundary segmentation, where Jaccard indices of 0.9639 (SAT) and 0.9430 (VAT) fell short of UNet's performance in specific cases." + }, + { + "type": "text", + "bbox": [ + 0.505, + 0.263, + 0.913, + 0.354 + ], + "angle": 0, + "content": "The proposed model reduces reliance on manual annotation, providing an efficient, scalable solution for BC analysis. It addresses challenges in large-scale imaging studies, paving the way for broader applications in multi-class segmentation and personalized healthcare, with future work focusing on boundary refinement and dataset generalization." + }, + { + "type": "title", + "bbox": [ + 0.651, + 0.362, + 0.768, + 0.375 + ], + "angle": 0, + "content": "METHODOLOGY" + }, + { + "type": "title", + "bbox": [ + 0.508, + 0.381, + 0.608, + 0.395 + ], + "angle": 0, + "content": "Ghost Module" + }, + { + "type": "text", + "bbox": [ + 0.505, + 0.4, + 0.913, + 0.536 + ], + "angle": 0, + "content": "The Ghost-UNet [9] architecture is designed to enhance computational efficiency by minimizing redundancy in feature maps. Traditional convolution operations are computationally intensive for generating high-dimensional feature maps. The Ghost module addresses this limitation by employing lightweight operations to generate ghost feature maps, which are lower-resolution representations of the input features. This significantly improves efficiency without sacrificing accuracy." + }, + { + "type": "text", + "bbox": [ + 0.523, + 0.536, + 0.869, + 0.551 + ], + "angle": 0, + "content": "The Ghost module is mathematically expressed as:" + }, + { + "type": "equation", + "bbox": [ + 0.642, + 0.559, + 0.776, + 0.576 + ], + "angle": 0, + "content": "\\[\n\\mathbf {F} _ {o} = \\gamma (\\mathbf {W} * \\mathbf {F} _ {i}) + \\beta\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.584, + 0.557, + 0.597 + ], + "angle": 0, + "content": "where:" + }, + { + "type": "text", + "bbox": [ + 0.523, + 0.601, + 0.811, + 0.616 + ], + "angle": 0, + "content": "- \\(\\mathbf{F}_o\\): High-resolution output feature map," + }, + { + "type": "text", + "bbox": [ + 0.523, + 0.617, + 0.799, + 0.631 + ], + "angle": 0, + "content": "- \\(\\mathbf{F}_i\\): Low-resolution ghost feature map," + }, + { + "type": "text", + "bbox": [ + 0.523, + 0.632, + 0.872, + 0.646 + ], + "angle": 0, + "content": "- W: Weight tensor of the ghost convolution layer," + }, + { + "type": "text", + "bbox": [ + 0.523, + 0.647, + 0.857, + 0.662 + ], + "angle": 0, + "content": "- \\(\\gamma\\) and \\(\\beta\\): Learnable scale and shift parameters," + }, + { + "type": "text", + "bbox": [ + 0.523, + 0.663, + 0.755, + 0.676 + ], + "angle": 0, + "content": "\\(\\star\\) Ghost convolution operation." + }, + { + "type": "list", + "bbox": [ + 0.523, + 0.601, + 0.872, + 0.676 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.523, + 0.679, + 0.888, + 0.694 + ], + "angle": 0, + "content": "The ghost convolution operation is further defined as:" + }, + { + "type": "equation", + "bbox": [ + 0.612, + 0.7, + 0.808, + 0.728 + ], + "angle": 0, + "content": "\\[\n\\mathbf {F} _ {i} [ m ] = \\sum_ {n} W [ n, m ] * \\mathbf {X} [ n ] + b\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.506, + 0.733, + 0.913, + 0.763 + ], + "angle": 0, + "content": "where \\( m \\) represents the ghost channels, \\( n \\) is the input feature map index, and \\( b \\) denotes the bias term." + }, + { + "type": "title", + "bbox": [ + 0.506, + 0.772, + 0.759, + 0.786 + ], + "angle": 0, + "content": "Attention GhostUNet++ Architecture" + }, + { + "type": "text", + "bbox": [ + 0.505, + 0.79, + 0.913, + 0.927 + ], + "angle": 0, + "content": "In the proposed Attention GhostUNet++, Ghost bottleneck layers are integrated into the UNet++ architecture, each enhanced with Channel, Spatial, and Depth Attention mechanisms. These mechanisms improve feature refinement by dynamically emphasizing relevant regions in the feature maps while suppressing redundant information. The network consists of 15 bottleneck layers arranged within a nested architecture that supports robust contraction and expansion paths:" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.099, + 0.066, + 0.918, + 0.442 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.363, + 0.457, + 0.635, + 0.471 + ], + "angle": 0, + "content": "Fig. 1. Attention GhostUNet++ Architecture." + }, + { + "type": "text", + "bbox": [ + 0.099, + 0.499, + 0.487, + 0.527 + ], + "angle": 0, + "content": "- Contraction Path: Extracts features at multiple resolutions." + }, + { + "type": "text", + "bbox": [ + 0.099, + 0.53, + 0.488, + 0.558 + ], + "angle": 0, + "content": "- Expansion Path: Reconstructs feature maps for precise segmentation." + }, + { + "type": "list", + "bbox": [ + 0.099, + 0.499, + 0.488, + 0.558 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.083, + 0.562, + 0.49, + 0.589 + ], + "angle": 0, + "content": "Each bottleneck layer in the network can be represented as:" + }, + { + "type": "equation", + "bbox": [ + 0.221, + 0.591, + 0.352, + 0.608 + ], + "angle": 0, + "content": "\\[\n\\mathbf {F} _ {o} = \\mathcal {A} (\\mathcal {G} (\\mathbf {F} _ {i}, \\Theta))\n\\]" + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.615, + 0.134, + 0.628 + ], + "angle": 0, + "content": "where:" + }, + { + "type": "text", + "bbox": [ + 0.1, + 0.633, + 0.229, + 0.647 + ], + "angle": 0, + "content": "- \\(\\mathbf{F}_i\\): Input tensor," + }, + { + "type": "text", + "bbox": [ + 0.1, + 0.648, + 0.243, + 0.661 + ], + "angle": 0, + "content": "- \\(\\mathbf{F}_o\\): Output tensor," + }, + { + "type": "text", + "bbox": [ + 0.1, + 0.663, + 0.299, + 0.677 + ], + "angle": 0, + "content": "\\(\\mathcal{G}\\) : Ghost bottleneck layer," + }, + { + "type": "text", + "bbox": [ + 0.1, + 0.679, + 0.294, + 0.691 + ], + "angle": 0, + "content": "\\(\\mathcal{A}\\) : Attention mechanism," + }, + { + "type": "text", + "bbox": [ + 0.1, + 0.693, + 0.329, + 0.707 + ], + "angle": 0, + "content": "- \\(\\Theta\\): Set of learnable parameters." + }, + { + "type": "list", + "bbox": [ + 0.1, + 0.633, + 0.329, + 0.707 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.083, + 0.71, + 0.489, + 0.739 + ], + "angle": 0, + "content": "At each hierarchical level \\( l \\) in the network, the feature maps are calculated as:" + }, + { + "type": "equation", + "bbox": [ + 0.171, + 0.747, + 0.402, + 0.765 + ], + "angle": 0, + "content": "\\[\n\\mathbf {F} _ {l} = \\mathcal {P} (\\mathcal {G} (\\mathbf {F} _ {l - 1})) + \\mathcal {U} (\\mathcal {G} (\\mathbf {F} _ {l + 1}))\n\\]" + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.772, + 0.134, + 0.784 + ], + "angle": 0, + "content": "where:" + }, + { + "type": "text", + "bbox": [ + 0.1, + 0.789, + 0.272, + 0.803 + ], + "angle": 0, + "content": "\\(\\mathcal{P}\\) : Pooling operation," + }, + { + "type": "text", + "bbox": [ + 0.1, + 0.804, + 0.305, + 0.818 + ], + "angle": 0, + "content": "\\(\\mathcal{U}\\) : Up-sampling operation," + }, + { + "type": "text", + "bbox": [ + 0.1, + 0.819, + 0.241, + 0.832 + ], + "angle": 0, + "content": "\\(\\mathcal{G}\\) : Ghost module." + }, + { + "type": "list", + "bbox": [ + 0.1, + 0.789, + 0.305, + 0.832 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.1, + 0.836, + 0.337, + 0.85 + ], + "angle": 0, + "content": "The final output of the network is:" + }, + { + "type": "equation", + "bbox": [ + 0.197, + 0.859, + 0.376, + 0.875 + ], + "angle": 0, + "content": "\\[\n\\mathbf {F} _ {\\text {f i n a l}} = \\mathbf {F} _ {1} + \\mathbf {F} _ {2} + \\dots + \\mathbf {F} _ {n}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.083, + 0.882, + 0.49, + 0.927 + ], + "angle": 0, + "content": "where \\( n \\) is the number of hierarchical levels in the architecture. This innovative integration of Ghost bottleneck layers with attention mechanisms enables Attention GhostUNet++" + }, + { + "type": "text", + "bbox": [ + 0.506, + 0.499, + 0.914, + 0.545 + ], + "angle": 0, + "content": "to achieve high accuracy, computational efficiency, and improved feature refinement, making it a robust solution for medical image segmentation." + }, + { + "type": "title", + "bbox": [ + 0.606, + 0.559, + 0.816, + 0.572 + ], + "angle": 0, + "content": "II. EXPERIMENTAL RESULTS" + }, + { + "type": "title", + "bbox": [ + 0.507, + 0.581, + 0.571, + 0.594 + ], + "angle": 0, + "content": "Datasets" + }, + { + "type": "title", + "bbox": [ + 0.507, + 0.602, + 0.698, + 0.617 + ], + "angle": 0, + "content": "Datasets and Preprocessing" + }, + { + "type": "text", + "bbox": [ + 0.506, + 0.623, + 0.914, + 0.713 + ], + "angle": 0, + "content": "This study evaluates the proposed Attention GhostUNet++ model using two datasets: the Abdominal Adipose Tissue CT Image Dataset (AATTCT-IDS) [10] and the Liver Tumor Segmentation Benchmark (LiTS) [11]. These datasets provide annotated CT images for segmenting SAT, VAT, and liver regions." + }, + { + "type": "text", + "bbox": [ + 0.506, + 0.714, + 0.914, + 0.82 + ], + "angle": 0, + "content": "The AATTCT-IDS dataset [10] includes 13,732 CT slices (3,213 annotated) from 300 subjects, focusing on SAT and VAT. Challenges arise from individual variability and overlapping boundaries between these compartments. The LiTS dataset [11] contains 201 CT volumes annotated for liver regions, featuring diverse liver shapes and pathologies. Liver segmentation was prioritized in this study." + }, + { + "type": "text", + "bbox": [ + 0.506, + 0.821, + 0.913, + 0.896 + ], + "angle": 0, + "content": "Preprocessing included resizing CT slices to a consistent resolution, normalizing pixel intensities, and cropping volumes to focus on regions of interest. Data augmentation (e.g., rotations, flipping, scaling) was applied to enhance variability and reduce overfitting." + }, + { + "type": "text", + "bbox": [ + 0.506, + 0.897, + 0.914, + 0.927 + ], + "angle": 0, + "content": "These datasets were critical for validating the robustness and generalizability of the Attention GhostUNet++ model" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.09, + 0.062, + 0.911, + 0.374 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.082, + 0.39, + 0.916, + 0.427 + ], + "angle": 0, + "content": "Fig. 2. Segmentation results for randomly selected CT image examples, the columns represent (1) the original CT image, (2) ground truth annotations, (3) model-predicted segmentation output, (4) mask differences between ground truth and predictions, (5) predicted segmentation masks overlaid on the original CT image." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.454, + 0.49, + 0.483 + ], + "angle": 0, + "content": "in segmenting complex anatomical structures across varying scenarios." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.494, + 0.204, + 0.509 + ], + "angle": 0, + "content": "Training Settings" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.513, + 0.49, + 0.618 + ], + "angle": 0, + "content": "The proposed Attention GhostUNet++ model was implemented in PyTorch 2.0 and trained on an Nvidia 3090Ti GPU. Xavier initialization ensured effective weight scaling. The datasets were split into training, validation, and test sets (70:20:10), with data augmentation (rotations, flipping, scaling, and intensity variations) applied to improve generalization." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.619, + 0.49, + 0.709 + ], + "angle": 0, + "content": "Training used the Adam optimizer [16] \\((1\\times 10^{-4}\\) initial learning rate, cosine annealing decay) and a combined Dice and cross-entropy loss. Mini-batches of size 16 optimized GPU usage, while early stopping (100-epoch patience) prevented overfitting. The best-performing model was selected based on the validation Dice coefficient (DC)." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.71, + 0.49, + 0.74 + ], + "angle": 0, + "content": "Segmentation accuracy was evaluated using the DC and Jaccard index (JI):" + }, + { + "type": "equation", + "bbox": [ + 0.136, + 0.745, + 0.437, + 0.78 + ], + "angle": 0, + "content": "\\[\n\\text {D i c e} = \\frac {2 | \\mathbf {P} \\cap \\mathbf {G} |}{| \\mathbf {P} | + | \\mathbf {G} |} = \\frac {2 \\sum_ {i} P _ {i} G _ {i}}{\\sum_ {i} P _ {i} + \\sum_ {i} G _ {i}}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.119, + 0.784, + 0.454, + 0.818 + ], + "angle": 0, + "content": "\\[\n\\text {J a c c a r d I n d e x} = \\frac {\\left| \\mathbf {P} \\cap \\mathbf {G} \\right|}{\\left| \\mathbf {P} \\cup \\mathbf {G} \\right|} = \\frac {\\sum_ {i} P _ {i} G _ {i}}{\\sum_ {i} P _ {i} + \\sum_ {i} G _ {i} - \\sum_ {i} P _ {i} G _ {i}}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.083, + 0.822, + 0.489, + 0.854 + ], + "angle": 0, + "content": "where \\(P_{i}\\) and \\(G_{i}\\) represent the predicted and ground truth segmentation at pixel \\(i\\), respectively." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.863, + 0.251, + 0.877 + ], + "angle": 0, + "content": "Performance Evaluation" + }, + { + "type": "text", + "bbox": [ + 0.083, + 0.882, + 0.49, + 0.927 + ], + "angle": 0, + "content": "The performance of the proposed Attention GhostUNet++ model was compared against baseline models (UNet [12], UNet++ [13], ResUNet [14] and GhostUNet++" + }, + { + "type": "text", + "bbox": [ + 0.506, + 0.454, + 0.913, + 0.499 + ], + "angle": 0, + "content": "[15] for VAT, SAT, and liver. Evaluation metrics included the DC and JI, which assess segmentation accuracy and overlap, respectively." + }, + { + "type": "text", + "bbox": [ + 0.506, + 0.5, + 0.914, + 0.636 + ], + "angle": 0, + "content": "The proposed model outperformed the baseline models in most cases. As indicated in Table I, for VAT segmentation, it achieved a DC of 0.9430 and a JI of 0.9430, closely competing with UNet's JI of 0.9491. In SAT segmentation, it achieved a DC of 0.9639, matching UNet's JI of 0.9807 but indicating room for improvement in handling boundary details. For liver segmentation, Attention GhostUNet++ demonstrated superior performance with a Dice coefficient of 0.9652 and a JI of 0.9496, outperforming all baselines." + }, + { + "type": "table_caption", + "bbox": [ + 0.516, + 0.649, + 0.905, + 0.692 + ], + "angle": 0, + "content": "TABLEI MEAN SEGMENTATION DICE AND JACCARD SCORES FOR DIFFERENT TARGETS IN AATTCT-IDS [10] AND LITS [11] DATASETS" + }, + { + "type": "table", + "bbox": [ + 0.508, + 0.704, + 0.93, + 0.835 + ], + "angle": 0, + "content": "
MethodMetricsVATSATLiver
UNet [12]Dice coefficient0.90570.96040.8746
Jaccard index0.94910.98070.8456
UNet++ [13]Dice coefficient0.87420.87410.9468
Jaccard index0.81570.86390.9311
ResUNet [14]Dice coefficient0.91840.94820.9587
Jaccard index0.90210.96530.9412
GhostUNet++ [15]Dice coefficient0.88470.89160.9554
Jaccard index0.79160.84510.9318
Attention GhostUNet++Dice coefficient0.94300.96390.9652
Jaccard index0.94300.96390.9496
" + }, + { + "type": "text", + "bbox": [ + 0.506, + 0.852, + 0.914, + 0.927 + ], + "angle": 0, + "content": "These results confirm the effectiveness of the Attention GhostUNet++ [15] model for medical image segmentation. While achieving state-of-the-art performance across most tasks, minor limitations in SAT segmentation suggest opportunities for further refinement to enhance boundary accuracy" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.084, + 0.067, + 0.274, + 0.081 + ], + "angle": 0, + "content": "and overall generalizability." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.082, + 0.49, + 0.232 + ], + "angle": 0, + "content": "Fig. 2 showcases example segmentation outputs from the test dataset. Ground truth annotations are displayed alongside predictions from each model. The Attention GhostUNet++ [15] model exhibits superior boundary adherence and accurate region segmentation, particularly in challenging cases where SAT and VAT boundaries overlap or are difficult to distinguish. Additionally, for liver segmentation, the proposed model effectively captures the organ's irregular contours, delivering results that clearly outperform those of the baseline models." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.233, + 0.49, + 0.295 + ], + "angle": 0, + "content": "These visual comparisons underscore the model's capability to handle complex segmentation tasks with enhanced precision and boundary accuracy, making it a robust solution for medical imaging applications." + }, + { + "type": "title", + "bbox": [ + 0.221, + 0.306, + 0.353, + 0.32 + ], + "angle": 0, + "content": "III. CONCLUSION" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.327, + 0.49, + 0.417 + ], + "angle": 0, + "content": "We proposed Attention GhostUNet++, a novel deep learning architecture for segmenting SAT, VAT, and liver regions in CT images. By integrating Channel, Spatial, and Depth Attention mechanisms into Ghost-Net bottleneck layers, the model achieves enhanced feature refinement and contextual understanding with computational efficiency." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.418, + 0.49, + 0.508 + ], + "angle": 0, + "content": "Experiments on AATTCT-IDS[10] and LiTS[11] datasets demonstrated state-of-the-art performance, with DCs of 0.9430 (VAT), 0.9639 (SAT), and 0.9652 (liver). Visual comparisons highlighted the model's ability to accurately segment complex anatomical structures, reducing boundary errors and outperforming baseline models." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.509, + 0.49, + 0.6 + ], + "angle": 0, + "content": "This automated solution reduces reliance on manual annotations and enhances scalability for clinical and research applications. Future work will focus on addressing limitations, extending to multi-class tasks, and validating across diverse datasets, paving the way for efficient and accurate medical imaging tools." + }, + { + "type": "title", + "bbox": [ + 0.165, + 0.612, + 0.409, + 0.626 + ], + "angle": 0, + "content": "LIMITATIONS AND FUTURE WORK" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.633, + 0.491, + 0.845 + ], + "angle": 0, + "content": "The proposed Attention GhostUNet++ model faces minor limitations in handling fine-grained boundaries, particularly for SAT segmentation, and its generalizability to diverse imaging modalities remains untested. It also requires further evaluation on datasets with significant variations in anatomical structures and imaging quality. Future work will focus on enhancing boundary detection, expanding validation to multi-class tasks and diverse datasets, and optimizing computational efficiency for clinical deployment. Additionally, integrating advanced techniques, such as edge-aware attention, could improve performance in challenging regions. Extending the model to segment complex pathological structures will further enhance its utility in diagnostic and therapeutic applications." + }, + { + "type": "title", + "bbox": [ + 0.239, + 0.857, + 0.334, + 0.87 + ], + "angle": 0, + "content": "REFERENCES" + }, + { + "type": "ref_text", + "bbox": [ + 0.091, + 0.879, + 0.49, + 0.927 + ], + "angle": 0, + "content": "[1] Mokdad, Ali H., Earl S. Ford, Barbara A. Bowman, William H. Dietz, Frank Vinicor, Virginia S. Bales, and James S. Marks. \"Prevalence of obesity, diabetes, and obesity-related health risk factors, 2001.\" Jama 289, no. 1 (2003): 76-79." + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.068, + 0.913, + 0.126 + ], + "angle": 0, + "content": "[2] Kaess, Bernhard M., Jacek Jozwiak, Miroslaw Mastej, Witold Lukas, Wladyslaw Grzeseczak, Adam Windak, Wieslawa Piwowarska et al. \"Association between anthropometric obesity measures and coronary artery disease: a cross-sectional survey of 16 657 subjects from 444 Polish cities.\" Heart 96, no. 2 (2010): 131-135." + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.127, + 0.914, + 0.182 + ], + "angle": 0, + "content": "[3] Kullberg, Joel, Anders Hedstrom, John Brandberg, Robin Strand, Lars Johansson, Goran Bergstrom, and Håkan Ahlström. \"Automated analysis of liver fat, muscle and adipose tissue distribution from CT suitable for large-scale studies.\" Scientific reports 7, no. 1 (2017): 10425." + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.182, + 0.913, + 0.239 + ], + "angle": 0, + "content": "[4] Tanaka, Muhei, Hiroshi Okada, Yoshitaka Hashimoto, Muneaki Kumagai, Hiromi Nishimura, and Michiaki Fukui. \"Distinct associations of intraperitoneal and retroperitoneal visceral adipose tissues with metabolic syndrome and its components.\" Clinical Nutrition 40, no. 5 (2021): 3479-3484.." + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.239, + 0.913, + 0.295 + ], + "angle": 0, + "content": "[5] Tanaka, M., Okada, H., Hashimoto, Y., Kumagai, M., Nishimura, H., & Fukui, M. (2020). Intraperitoneal, but not retroperitoneal, visceral adipose tissue is associated with diabetes mellitus: a cross-sectional, retrospective pilot analysis. Diabetology & Metabolic Syndrome, 12, 1-10." + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.295, + 0.913, + 0.342 + ], + "angle": 0, + "content": "[6] Christen, T., Sheikine, Y., Rocha, V. Z., Hurwitz, S., Goldfine, A. B., Di Carli, M., & Libby, P. (2010). Increased glucose uptake in visceral versus subcutaneous adipose tissue revealed by PET imaging. JACC: Cardiovascular Imaging, 3(8), 843-851." + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.342, + 0.913, + 0.387 + ], + "angle": 0, + "content": "[7] Kelley, D. E., Thaete, F. L., Troost, F., Huwe, T., & Goodpaster, B. H. (2000). Subdivisions of subcutaneous abdominal adipose tissue and insulin resistance. American Journal of Physiology-Endocrinology and Metabolism, 278(5), E941-E948." + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.387, + 0.913, + 0.443 + ], + "angle": 0, + "content": "[8] Smith, S. R., Lovejoy, J. C., Greenway, F., Ryan, D., deJonge, L., de la Bretonne, J., ... & Bray, G. A. (2001). Contributions of total body fat, abdominal subcutaneous adipose tissue compartments, and visceral adipose tissue to the metabolic complications of obesity. Metabolism-Clinical and Experimental, 50(4), 425-435." + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.443, + 0.913, + 0.477 + ], + "angle": 0, + "content": "[9] Kazerouni, I. A., Dooly, G., & Toal, D. (2021). Ghost-UNet: an asymmetric encoder-decoder architecture for semantic segmentation from scratch. IEEE Access, 9, 97457-97465." + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.477, + 0.913, + 0.523 + ], + "angle": 0, + "content": "[10] Ma, Z., Li, C., Du, T., Zhang, L., Tang, D., Ma, D., ... & Sun, H. (2024). AATCT-IDS: A benchmark Abdominal Adipose Tissue CT Image Dataset for image denoising, semantic segmentation, and radiomics evaluation. Computers in Biology and Medicine, 177, 108628." + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.523, + 0.913, + 0.568 + ], + "angle": 0, + "content": "[11] Bilic, P., Christ, P., Li, H.B., Vorontsov, E., Ben-Cohen, A., Kaissis, G., Szeskin, A., Jacobs, C., Mamani, G.E.H., Chartrand, G. and Lohofer, F., 2023. The liver tumor segmentation benchmark (lits). Medical Image Analysis, 84, p.102680." + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.568, + 0.913, + 0.625 + ], + "angle": 0, + "content": "[12] Ronneberger, O., Fischer, P., & Brox, T. (2015). U-net: Convolutional networks for biomedical image segmentation. In Medical image computing and computer-assisted intervention-MICCAI 2015: 18th international conference, Munich, Germany, October 5-9, 2015, proceedings, part III 18 (pp. 234-241). Springer International Publishing." + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.625, + 0.913, + 0.715 + ], + "angle": 0, + "content": "[13] Zhou, Z., Rahman Siddiquee, M. M., Tajbakhsh, N., & Liang, J. (2018). Unet++: A nested u-net architecture for medical image segmentation. In Deep Learning in Medical Image Analysis and Multimodal Learning for Clinical Decision Support: 4th International Workshop, DLMIA 2018, and 8th International Workshop, ML-CDS 2018, Held in Conjunction with MICCAI 2018, Granada, Spain, September 20, 2018, Proceedings 4 (pp. 3-11). Springer International Publishing." + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.715, + 0.913, + 0.749 + ], + "angle": 0, + "content": "[14] Rahman, H., Bukht, T. F. N., Imran, A., Tariq, J., Tu, S., & Alzahrani, A. (2022). A deep learning approach for liver and tumor segmentation in CT images using ResUNet. Bioengineering, 9(8), 368." + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.749, + 0.913, + 0.795 + ], + "angle": 0, + "content": "[15] Ahmad, N., Strand, R., Sparresäter, B., Tarai, S., Lundström, E., Bergström, G., Ahlström, H. and Kullberg, J., 2023. Automatic segmentation of large-scale CT image datasets for detailed body composition analysis. BMC bioinformatics, 24(1), p.346." + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.794, + 0.913, + 0.828 + ], + "angle": 0, + "content": "[16] Bock, S. and Weiß, M., 2019, July. A proof of local convergence for the Adam optimizer. In 2019 international joint conference on neural networks (IJCNN) (pp. 1-8). IEEE." + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.828, + 0.913, + 0.863 + ], + "angle": 0, + "content": "[17] Hayat, Mansoor. \"Squeeze & Excitation joint with Combined Channel and Spatial Attention for Pathology Image Super-Resolution.\" Franklin Open 8 (2024): 100170." + }, + { + "type": "list", + "bbox": [ + 0.511, + 0.068, + 0.914, + 0.863 + ], + "angle": 0, + "content": null + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_11xxx/2504.11491/0400dc9e-bb51-4dac-9ac6-e38f3b9731ae_origin.pdf b/data/2025/2504_11xxx/2504.11491/0400dc9e-bb51-4dac-9ac6-e38f3b9731ae_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..fb7b02b0c91c44dd76c230677690dcd858b792e7 --- /dev/null +++ b/data/2025/2504_11xxx/2504.11491/0400dc9e-bb51-4dac-9ac6-e38f3b9731ae_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:07a8defe260a21c8e484941f9c6266786858c1beae427a00c95da2fc4997297a +size 2594754 diff --git a/data/2025/2504_11xxx/2504.11491/full.md b/data/2025/2504_11xxx/2504.11491/full.md new file mode 100644 index 0000000000000000000000000000000000000000..8bf5c05e0e1cf5b4dc170d3e99fa640efd3d1b56 --- /dev/null +++ b/data/2025/2504_11xxx/2504.11491/full.md @@ -0,0 +1,183 @@ +# Attention GhostUNet++: Enhanced Segmentation of Adipose Tissue and Liver in CT Images + +Mansoor Hayat1, Supavadee Aramvith2, Subrata Bhattacharjee3 and Nouman Ahmad4 + +Abstract—Accurate segmentation of abdominal adipose tissue, including subcutaneous (SAT) and visceral adipose tissue (VAT), along with liver segmentation, is essential for understanding body composition and associated health risks such as type 2 diabetes and cardiovascular disease. This study proposes Attention GhostUNet++, a novel deep learning model incorporating Channel, Spatial, and Depth Attention mechanisms into the Ghost UNet++ bottleneck for automated, precise segmentation. Evaluated on the AATTCT-IDS and LiTS datasets, the model achieved Dice coefficients of 0.9430 for VAT, 0.9639 for SAT, and 0.9652 for liver segmentation, surpassing baseline models. Despite minor limitations in boundary detail segmentation, the proposed model significantly enhances feature refinement, contextual understanding, and computational efficiency, offering a robust solution for body composition analysis. The implementation of the proposed Attention GhostUNet++ model is available at: https://github.com/MansoorHayat777/Attention-GhostUNetPlusPlus. + +Clinical relevance—The Attention GhostUNet++ model offers a significant advancement in the automated segmentation of adipose tissue and liver regions from CT images. Accurate delineation of visceral and subcutaneous adipose tissue, alongside liver structures, is critical for clinicians managing cardiometabolic disorders, including type 2 diabetes and cardiovascular diseases. By reducing reliance on manual annotations, the model enhances efficiency and scalability, paving the way for its integration into routine clinical workflows and large-scale body composition studies. + +# I. INTRODUCTION + +Obesity is a significant risk factor for cardiometabolic diseases, including type 2 diabetes (T2D), cardiovascular disease (CVD), non-alcoholic fatty liver disease, and hypertension [1] [2]. Body composition (BC) analysis focuses on the distribution of fatty and non-fatty tissues, particularly in depots such as adipose tissue, muscle, liver, and bone, playing a crucial role in predicting and preventing these diseases [3]. Among adipose tissue compartments, visceral adipose tissue (VAT) and subcutaneous adipose tissue (SAT) are key. VAT, located within the abdominal cavity [4], [5]. Similarly, SAT, located beneath the skin [3] [6], [7], [8], [17]. + +This study introduces Attention GhostUNet++, an advanced deep learning model that incorporates Channel, Spatial, and Depth Attention mechanisms into the Ghost UNet++ bottleneck for automated segmentation of VAT, SAT, and liver regions from CT images. Using the AATTCT-IDS [10] and LiTS [11] datasets, the model achieved Dice coefficients of 0.9430 (VAT), 0.9639 (SAT), and 0.9652 (liver), + +This research is funded by Thailand Science Research and Innovation Fund Chulalongkorn University (IND_FF_68_280_2100_039). + +outperforming baseline models in most metrics. However, challenges remain in SAT and VAT boundary segmentation, where Jaccard indices of 0.9639 (SAT) and 0.9430 (VAT) fell short of UNet's performance in specific cases. + +The proposed model reduces reliance on manual annotation, providing an efficient, scalable solution for BC analysis. It addresses challenges in large-scale imaging studies, paving the way for broader applications in multi-class segmentation and personalized healthcare, with future work focusing on boundary refinement and dataset generalization. + +# METHODOLOGY + +# Ghost Module + +The Ghost-UNet [9] architecture is designed to enhance computational efficiency by minimizing redundancy in feature maps. Traditional convolution operations are computationally intensive for generating high-dimensional feature maps. The Ghost module addresses this limitation by employing lightweight operations to generate ghost feature maps, which are lower-resolution representations of the input features. This significantly improves efficiency without sacrificing accuracy. + +The Ghost module is mathematically expressed as: + +$$ +\mathbf {F} _ {o} = \gamma (\mathbf {W} * \mathbf {F} _ {i}) + \beta +$$ + +where: + +- $\mathbf{F}_o$ : High-resolution output feature map, +- $\mathbf{F}_i$ : Low-resolution ghost feature map, +- W: Weight tensor of the ghost convolution layer, +- $\gamma$ and $\beta$ : Learnable scale and shift parameters, +$\star$ Ghost convolution operation. + +The ghost convolution operation is further defined as: + +$$ +\mathbf {F} _ {i} [ m ] = \sum_ {n} W [ n, m ] * \mathbf {X} [ n ] + b +$$ + +where $m$ represents the ghost channels, $n$ is the input feature map index, and $b$ denotes the bias term. + +# Attention GhostUNet++ Architecture + +In the proposed Attention GhostUNet++, Ghost bottleneck layers are integrated into the UNet++ architecture, each enhanced with Channel, Spatial, and Depth Attention mechanisms. These mechanisms improve feature refinement by dynamically emphasizing relevant regions in the feature maps while suppressing redundant information. The network consists of 15 bottleneck layers arranged within a nested architecture that supports robust contraction and expansion paths: + +![](images/5079be29d2c99a728fa0fdb107927d0284c0f855bf5a5d187f005a1314ab8877.jpg) +Fig. 1. Attention GhostUNet++ Architecture. + +- Contraction Path: Extracts features at multiple resolutions. +- Expansion Path: Reconstructs feature maps for precise segmentation. + +Each bottleneck layer in the network can be represented as: + +$$ +\mathbf {F} _ {o} = \mathcal {A} (\mathcal {G} (\mathbf {F} _ {i}, \Theta)) +$$ + +# where: + +- $\mathbf{F}_i$ : Input tensor, +- $\mathbf{F}_o$ : Output tensor, +$\mathcal{G}$ : Ghost bottleneck layer, +$\mathcal{A}$ : Attention mechanism, +- $\Theta$ : Set of learnable parameters. + +At each hierarchical level $l$ in the network, the feature maps are calculated as: + +$$ +\mathbf {F} _ {l} = \mathcal {P} (\mathcal {G} (\mathbf {F} _ {l - 1})) + \mathcal {U} (\mathcal {G} (\mathbf {F} _ {l + 1})) +$$ + +# where: + +$\mathcal{P}$ : Pooling operation, +$\mathcal{U}$ : Up-sampling operation, +$\mathcal{G}$ : Ghost module. + +The final output of the network is: + +$$ +\mathbf {F} _ {\text {f i n a l}} = \mathbf {F} _ {1} + \mathbf {F} _ {2} + \dots + \mathbf {F} _ {n} +$$ + +where $n$ is the number of hierarchical levels in the architecture. This innovative integration of Ghost bottleneck layers with attention mechanisms enables Attention GhostUNet++ + +to achieve high accuracy, computational efficiency, and improved feature refinement, making it a robust solution for medical image segmentation. + +# II. EXPERIMENTAL RESULTS + +# Datasets + +# Datasets and Preprocessing + +This study evaluates the proposed Attention GhostUNet++ model using two datasets: the Abdominal Adipose Tissue CT Image Dataset (AATTCT-IDS) [10] and the Liver Tumor Segmentation Benchmark (LiTS) [11]. These datasets provide annotated CT images for segmenting SAT, VAT, and liver regions. + +The AATTCT-IDS dataset [10] includes 13,732 CT slices (3,213 annotated) from 300 subjects, focusing on SAT and VAT. Challenges arise from individual variability and overlapping boundaries between these compartments. The LiTS dataset [11] contains 201 CT volumes annotated for liver regions, featuring diverse liver shapes and pathologies. Liver segmentation was prioritized in this study. + +Preprocessing included resizing CT slices to a consistent resolution, normalizing pixel intensities, and cropping volumes to focus on regions of interest. Data augmentation (e.g., rotations, flipping, scaling) was applied to enhance variability and reduce overfitting. + +These datasets were critical for validating the robustness and generalizability of the Attention GhostUNet++ model + +![](images/7b5c57a797698a60272f63df9d03e910ea3876702b26ae0df2950ec47105d257.jpg) +Fig. 2. Segmentation results for randomly selected CT image examples, the columns represent (1) the original CT image, (2) ground truth annotations, (3) model-predicted segmentation output, (4) mask differences between ground truth and predictions, (5) predicted segmentation masks overlaid on the original CT image. + +in segmenting complex anatomical structures across varying scenarios. + +# Training Settings + +The proposed Attention GhostUNet++ model was implemented in PyTorch 2.0 and trained on an Nvidia 3090Ti GPU. Xavier initialization ensured effective weight scaling. The datasets were split into training, validation, and test sets (70:20:10), with data augmentation (rotations, flipping, scaling, and intensity variations) applied to improve generalization. + +Training used the Adam optimizer [16] $(1\times 10^{-4}$ initial learning rate, cosine annealing decay) and a combined Dice and cross-entropy loss. Mini-batches of size 16 optimized GPU usage, while early stopping (100-epoch patience) prevented overfitting. The best-performing model was selected based on the validation Dice coefficient (DC). + +Segmentation accuracy was evaluated using the DC and Jaccard index (JI): + +$$ +\text {D i c e} = \frac {2 | \mathbf {P} \cap \mathbf {G} |}{| \mathbf {P} | + | \mathbf {G} |} = \frac {2 \sum_ {i} P _ {i} G _ {i}}{\sum_ {i} P _ {i} + \sum_ {i} G _ {i}} +$$ + +$$ +\text {J a c c a r d I n d e x} = \frac {\left| \mathbf {P} \cap \mathbf {G} \right|}{\left| \mathbf {P} \cup \mathbf {G} \right|} = \frac {\sum_ {i} P _ {i} G _ {i}}{\sum_ {i} P _ {i} + \sum_ {i} G _ {i} - \sum_ {i} P _ {i} G _ {i}} +$$ + +where $P_{i}$ and $G_{i}$ represent the predicted and ground truth segmentation at pixel $i$ , respectively. + +# Performance Evaluation + +The performance of the proposed Attention GhostUNet++ model was compared against baseline models (UNet [12], UNet++ [13], ResUNet [14] and GhostUNet++ + +[15] for VAT, SAT, and liver. Evaluation metrics included the DC and JI, which assess segmentation accuracy and overlap, respectively. + +The proposed model outperformed the baseline models in most cases. As indicated in Table I, for VAT segmentation, it achieved a DC of 0.9430 and a JI of 0.9430, closely competing with UNet's JI of 0.9491. In SAT segmentation, it achieved a DC of 0.9639, matching UNet's JI of 0.9807 but indicating room for improvement in handling boundary details. For liver segmentation, Attention GhostUNet++ demonstrated superior performance with a Dice coefficient of 0.9652 and a JI of 0.9496, outperforming all baselines. + +TABLEI MEAN SEGMENTATION DICE AND JACCARD SCORES FOR DIFFERENT TARGETS IN AATTCT-IDS [10] AND LITS [11] DATASETS + +
MethodMetricsVATSATLiver
UNet [12]Dice coefficient0.90570.96040.8746
Jaccard index0.94910.98070.8456
UNet++ [13]Dice coefficient0.87420.87410.9468
Jaccard index0.81570.86390.9311
ResUNet [14]Dice coefficient0.91840.94820.9587
Jaccard index0.90210.96530.9412
GhostUNet++ [15]Dice coefficient0.88470.89160.9554
Jaccard index0.79160.84510.9318
Attention GhostUNet++Dice coefficient0.94300.96390.9652
Jaccard index0.94300.96390.9496
+ +These results confirm the effectiveness of the Attention GhostUNet++ [15] model for medical image segmentation. While achieving state-of-the-art performance across most tasks, minor limitations in SAT segmentation suggest opportunities for further refinement to enhance boundary accuracy + +and overall generalizability. + +Fig. 2 showcases example segmentation outputs from the test dataset. Ground truth annotations are displayed alongside predictions from each model. The Attention GhostUNet++ [15] model exhibits superior boundary adherence and accurate region segmentation, particularly in challenging cases where SAT and VAT boundaries overlap or are difficult to distinguish. Additionally, for liver segmentation, the proposed model effectively captures the organ's irregular contours, delivering results that clearly outperform those of the baseline models. + +These visual comparisons underscore the model's capability to handle complex segmentation tasks with enhanced precision and boundary accuracy, making it a robust solution for medical imaging applications. + +# III. CONCLUSION + +We proposed Attention GhostUNet++, a novel deep learning architecture for segmenting SAT, VAT, and liver regions in CT images. By integrating Channel, Spatial, and Depth Attention mechanisms into Ghost-Net bottleneck layers, the model achieves enhanced feature refinement and contextual understanding with computational efficiency. + +Experiments on AATTCT-IDS[10] and LiTS[11] datasets demonstrated state-of-the-art performance, with DCs of 0.9430 (VAT), 0.9639 (SAT), and 0.9652 (liver). Visual comparisons highlighted the model's ability to accurately segment complex anatomical structures, reducing boundary errors and outperforming baseline models. + +This automated solution reduces reliance on manual annotations and enhances scalability for clinical and research applications. Future work will focus on addressing limitations, extending to multi-class tasks, and validating across diverse datasets, paving the way for efficient and accurate medical imaging tools. + +# LIMITATIONS AND FUTURE WORK + +The proposed Attention GhostUNet++ model faces minor limitations in handling fine-grained boundaries, particularly for SAT segmentation, and its generalizability to diverse imaging modalities remains untested. It also requires further evaluation on datasets with significant variations in anatomical structures and imaging quality. Future work will focus on enhancing boundary detection, expanding validation to multi-class tasks and diverse datasets, and optimizing computational efficiency for clinical deployment. Additionally, integrating advanced techniques, such as edge-aware attention, could improve performance in challenging regions. Extending the model to segment complex pathological structures will further enhance its utility in diagnostic and therapeutic applications. + +# REFERENCES + +[1] Mokdad, Ali H., Earl S. Ford, Barbara A. Bowman, William H. Dietz, Frank Vinicor, Virginia S. Bales, and James S. Marks. "Prevalence of obesity, diabetes, and obesity-related health risk factors, 2001." Jama 289, no. 1 (2003): 76-79. + +[2] Kaess, Bernhard M., Jacek Jozwiak, Miroslaw Mastej, Witold Lukas, Wladyslaw Grzeseczak, Adam Windak, Wieslawa Piwowarska et al. "Association between anthropometric obesity measures and coronary artery disease: a cross-sectional survey of 16 657 subjects from 444 Polish cities." Heart 96, no. 2 (2010): 131-135. +[3] Kullberg, Joel, Anders Hedstrom, John Brandberg, Robin Strand, Lars Johansson, Goran Bergstrom, and Håkan Ahlström. "Automated analysis of liver fat, muscle and adipose tissue distribution from CT suitable for large-scale studies." Scientific reports 7, no. 1 (2017): 10425. +[4] Tanaka, Muhei, Hiroshi Okada, Yoshitaka Hashimoto, Muneaki Kumagai, Hiromi Nishimura, and Michiaki Fukui. "Distinct associations of intraperitoneal and retroperitoneal visceral adipose tissues with metabolic syndrome and its components." Clinical Nutrition 40, no. 5 (2021): 3479-3484.. +[5] Tanaka, M., Okada, H., Hashimoto, Y., Kumagai, M., Nishimura, H., & Fukui, M. (2020). Intraperitoneal, but not retroperitoneal, visceral adipose tissue is associated with diabetes mellitus: a cross-sectional, retrospective pilot analysis. Diabetology & Metabolic Syndrome, 12, 1-10. +[6] Christen, T., Sheikine, Y., Rocha, V. Z., Hurwitz, S., Goldfine, A. B., Di Carli, M., & Libby, P. (2010). Increased glucose uptake in visceral versus subcutaneous adipose tissue revealed by PET imaging. JACC: Cardiovascular Imaging, 3(8), 843-851. +[7] Kelley, D. E., Thaete, F. L., Troost, F., Huwe, T., & Goodpaster, B. H. (2000). Subdivisions of subcutaneous abdominal adipose tissue and insulin resistance. American Journal of Physiology-Endocrinology and Metabolism, 278(5), E941-E948. +[8] Smith, S. R., Lovejoy, J. C., Greenway, F., Ryan, D., deJonge, L., de la Bretonne, J., ... & Bray, G. A. (2001). Contributions of total body fat, abdominal subcutaneous adipose tissue compartments, and visceral adipose tissue to the metabolic complications of obesity. Metabolism-Clinical and Experimental, 50(4), 425-435. +[9] Kazerouni, I. A., Dooly, G., & Toal, D. (2021). Ghost-UNet: an asymmetric encoder-decoder architecture for semantic segmentation from scratch. IEEE Access, 9, 97457-97465. +[10] Ma, Z., Li, C., Du, T., Zhang, L., Tang, D., Ma, D., ... & Sun, H. (2024). AATCT-IDS: A benchmark Abdominal Adipose Tissue CT Image Dataset for image denoising, semantic segmentation, and radiomics evaluation. Computers in Biology and Medicine, 177, 108628. +[11] Bilic, P., Christ, P., Li, H.B., Vorontsov, E., Ben-Cohen, A., Kaissis, G., Szeskin, A., Jacobs, C., Mamani, G.E.H., Chartrand, G. and Lohofer, F., 2023. The liver tumor segmentation benchmark (lits). Medical Image Analysis, 84, p.102680. +[12] Ronneberger, O., Fischer, P., & Brox, T. (2015). U-net: Convolutional networks for biomedical image segmentation. In Medical image computing and computer-assisted intervention-MICCAI 2015: 18th international conference, Munich, Germany, October 5-9, 2015, proceedings, part III 18 (pp. 234-241). Springer International Publishing. +[13] Zhou, Z., Rahman Siddiquee, M. M., Tajbakhsh, N., & Liang, J. (2018). Unet++: A nested u-net architecture for medical image segmentation. In Deep Learning in Medical Image Analysis and Multimodal Learning for Clinical Decision Support: 4th International Workshop, DLMIA 2018, and 8th International Workshop, ML-CDS 2018, Held in Conjunction with MICCAI 2018, Granada, Spain, September 20, 2018, Proceedings 4 (pp. 3-11). Springer International Publishing. +[14] Rahman, H., Bukht, T. F. N., Imran, A., Tariq, J., Tu, S., & Alzahrani, A. (2022). A deep learning approach for liver and tumor segmentation in CT images using ResUNet. Bioengineering, 9(8), 368. +[15] Ahmad, N., Strand, R., Sparresäter, B., Tarai, S., Lundström, E., Bergström, G., Ahlström, H. and Kullberg, J., 2023. Automatic segmentation of large-scale CT image datasets for detailed body composition analysis. BMC bioinformatics, 24(1), p.346. +[16] Bock, S. and Weiß, M., 2019, July. A proof of local convergence for the Adam optimizer. In 2019 international joint conference on neural networks (IJCNN) (pp. 1-8). IEEE. +[17] Hayat, Mansoor. "Squeeze & Excitation joint with Combined Channel and Spatial Attention for Pathology Image Super-Resolution." Franklin Open 8 (2024): 100170. \ No newline at end of file diff --git a/data/2025/2504_11xxx/2504.11491/images/2ab96b9f4d0721ad90482187eeaf501236b56a8f77b60c04fd63f3e0d9a002bd.jpg b/data/2025/2504_11xxx/2504.11491/images/2ab96b9f4d0721ad90482187eeaf501236b56a8f77b60c04fd63f3e0d9a002bd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ac30ee75386237d67206a8f8ba47993bc6b10998 --- /dev/null +++ b/data/2025/2504_11xxx/2504.11491/images/2ab96b9f4d0721ad90482187eeaf501236b56a8f77b60c04fd63f3e0d9a002bd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7059f4d783589e2965c32a7fb29e779662ba10f8481a9a0e5db9e13221efcb9c +size 2828 diff --git a/data/2025/2504_11xxx/2504.11491/images/4a7090403da2f1c1ed4b90cfa9ff6b50888ee9966f9cdf359f14f31e43b2f63a.jpg b/data/2025/2504_11xxx/2504.11491/images/4a7090403da2f1c1ed4b90cfa9ff6b50888ee9966f9cdf359f14f31e43b2f63a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c4462bbbeed64d2d808cfd2affafe7c7ce99c77d --- /dev/null +++ b/data/2025/2504_11xxx/2504.11491/images/4a7090403da2f1c1ed4b90cfa9ff6b50888ee9966f9cdf359f14f31e43b2f63a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f53e0712587805026abbfa65908c6f73c57f9ed09709e088a33f5ae9b990c811 +size 2751 diff --git a/data/2025/2504_11xxx/2504.11491/images/4f954950bdb0f6a41bad3466a3dc840568cd98b44fdf8b38c9c39ce11267a15f.jpg b/data/2025/2504_11xxx/2504.11491/images/4f954950bdb0f6a41bad3466a3dc840568cd98b44fdf8b38c9c39ce11267a15f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8d3d935f71efd9da1b890ce31abda73f6e19f784 --- /dev/null +++ b/data/2025/2504_11xxx/2504.11491/images/4f954950bdb0f6a41bad3466a3dc840568cd98b44fdf8b38c9c39ce11267a15f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d12e06c70abc4a28f68583275ff8af664c0e0f46ad49df487dc38e383ab16289 +size 4479 diff --git a/data/2025/2504_11xxx/2504.11491/images/5079be29d2c99a728fa0fdb107927d0284c0f855bf5a5d187f005a1314ab8877.jpg b/data/2025/2504_11xxx/2504.11491/images/5079be29d2c99a728fa0fdb107927d0284c0f855bf5a5d187f005a1314ab8877.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7dbd90c550a03cb5ea16b763737b0fea09615624 --- /dev/null +++ b/data/2025/2504_11xxx/2504.11491/images/5079be29d2c99a728fa0fdb107927d0284c0f855bf5a5d187f005a1314ab8877.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c283736eabc3ac9a0b77fa4f84ad405cc86227c645da800aa20cfb574912c981 +size 85094 diff --git a/data/2025/2504_11xxx/2504.11491/images/7b5c57a797698a60272f63df9d03e910ea3876702b26ae0df2950ec47105d257.jpg b/data/2025/2504_11xxx/2504.11491/images/7b5c57a797698a60272f63df9d03e910ea3876702b26ae0df2950ec47105d257.jpg new file mode 100644 index 0000000000000000000000000000000000000000..642131af7b6202a5ba8e0028620db28dae9e85d7 --- /dev/null +++ b/data/2025/2504_11xxx/2504.11491/images/7b5c57a797698a60272f63df9d03e910ea3876702b26ae0df2950ec47105d257.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7e7017e75c9d05e06db4d39fd2c5a482b8ce0115350e5f8c52aa7020b0895c40 +size 125979 diff --git a/data/2025/2504_11xxx/2504.11491/images/8b54563372ae0a87b1fcfb1beb4c93035e982aa37ee045b35ff8c2c619ecf378.jpg b/data/2025/2504_11xxx/2504.11491/images/8b54563372ae0a87b1fcfb1beb4c93035e982aa37ee045b35ff8c2c619ecf378.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c925ce5944bb3d968ca04ce3f2ebea4acf3ac9cf --- /dev/null +++ b/data/2025/2504_11xxx/2504.11491/images/8b54563372ae0a87b1fcfb1beb4c93035e982aa37ee045b35ff8c2c619ecf378.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:719cdd53576680f0bbbfa9359062050eac3d199abef84f8bb3fb95f616922f69 +size 8284 diff --git a/data/2025/2504_11xxx/2504.11491/images/c874966e39c3429c376f6207375e6f36f90340ce39b6090ca28d05035063b76a.jpg b/data/2025/2504_11xxx/2504.11491/images/c874966e39c3429c376f6207375e6f36f90340ce39b6090ca28d05035063b76a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4b81ddfe1739e0cfef52d6ffc2000ce0a2dc2c9c --- /dev/null +++ b/data/2025/2504_11xxx/2504.11491/images/c874966e39c3429c376f6207375e6f36f90340ce39b6090ca28d05035063b76a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2edc62dda71b00c58a993067378b357c9232a422662e54ac57069dfcea193057 +size 55640 diff --git a/data/2025/2504_11xxx/2504.11491/images/e7d8015d06805f56549baf3b54838bdac7bae89f6ec5be0ae5b12c446878b459.jpg b/data/2025/2504_11xxx/2504.11491/images/e7d8015d06805f56549baf3b54838bdac7bae89f6ec5be0ae5b12c446878b459.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3653f9c42bd0ac72ff906901f42ec6d2bca72e47 --- /dev/null +++ b/data/2025/2504_11xxx/2504.11491/images/e7d8015d06805f56549baf3b54838bdac7bae89f6ec5be0ae5b12c446878b459.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f449a49382e7fa73b52c2bc5ac2835775572be1536e4e646424c7b27e2d25c5c +size 4421 diff --git a/data/2025/2504_11xxx/2504.11491/images/ec8d5f5467f110fe1d29c51943549b77706402d48a56da7f3330d093bd429de1.jpg b/data/2025/2504_11xxx/2504.11491/images/ec8d5f5467f110fe1d29c51943549b77706402d48a56da7f3330d093bd429de1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..23b3bbc19db1a0c188ee6430ad5b5f6029e34251 --- /dev/null +++ b/data/2025/2504_11xxx/2504.11491/images/ec8d5f5467f110fe1d29c51943549b77706402d48a56da7f3330d093bd429de1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:08af8ec93aac8b2a1fb082cdc76e45202d2507f99e04cf5eaaa31843b3f98ed9 +size 2819 diff --git a/data/2025/2504_11xxx/2504.11491/images/ef90033573bf128037444d4ea977ce7d72c28d61f449c72254483e43e9098d1e.jpg b/data/2025/2504_11xxx/2504.11491/images/ef90033573bf128037444d4ea977ce7d72c28d61f449c72254483e43e9098d1e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9f3eeacf825ef9c8f32ba4c681aff856b299ec7d --- /dev/null +++ b/data/2025/2504_11xxx/2504.11491/images/ef90033573bf128037444d4ea977ce7d72c28d61f449c72254483e43e9098d1e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:23cd8929cb0d25e3bf3158ba6b7454f4f18a9cc3bd28cbe81152c23a63e6bc22 +size 8704 diff --git a/data/2025/2504_11xxx/2504.11491/layout.json b/data/2025/2504_11xxx/2504.11491/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..df32de60b68282470d4c8f6ddc757a89e811c373 --- /dev/null +++ b/data/2025/2504_11xxx/2504.11491/layout.json @@ -0,0 +1,3841 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 52, + 69, + 558, + 109 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 69, + 558, + 109 + ], + "spans": [ + { + "bbox": [ + 52, + 69, + 558, + 109 + ], + "type": "text", + "content": "Attention GhostUNet++: Enhanced Segmentation of Adipose Tissue and Liver in CT Images" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 107, + 125, + 499, + 139 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 125, + 499, + 139 + ], + "spans": [ + { + "bbox": [ + 107, + 125, + 499, + 139 + ], + "type": "text", + "content": "Mansoor Hayat1, Supavadee Aramvith2, Subrata Bhattacharjee3 and Nouman Ahmad4" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 50, + 160, + 299, + 351 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 160, + 299, + 351 + ], + "spans": [ + { + "bbox": [ + 50, + 160, + 299, + 351 + ], + "type": "text", + "content": "Abstract—Accurate segmentation of abdominal adipose tissue, including subcutaneous (SAT) and visceral adipose tissue (VAT), along with liver segmentation, is essential for understanding body composition and associated health risks such as type 2 diabetes and cardiovascular disease. This study proposes Attention GhostUNet++, a novel deep learning model incorporating Channel, Spatial, and Depth Attention mechanisms into the Ghost UNet++ bottleneck for automated, precise segmentation. Evaluated on the AATTCT-IDS and LiTS datasets, the model achieved Dice coefficients of 0.9430 for VAT, 0.9639 for SAT, and 0.9652 for liver segmentation, surpassing baseline models. Despite minor limitations in boundary detail segmentation, the proposed model significantly enhances feature refinement, contextual understanding, and computational efficiency, offering a robust solution for body composition analysis. The implementation of the proposed Attention GhostUNet++ model is available at: https://github.com/MansoorHayat777/Attention-GhostUNetPlusPlus." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 50, + 360, + 299, + 462 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 360, + 299, + 462 + ], + "spans": [ + { + "bbox": [ + 50, + 360, + 299, + 462 + ], + "type": "text", + "content": "Clinical relevance—The Attention GhostUNet++ model offers a significant advancement in the automated segmentation of adipose tissue and liver regions from CT images. Accurate delineation of visceral and subcutaneous adipose tissue, alongside liver structures, is critical for clinicians managing cardiometabolic disorders, including type 2 diabetes and cardiovascular diseases. By reducing reliance on manual annotations, the model enhances efficiency and scalability, paving the way for its integration into routine clinical workflows and large-scale body composition studies." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 134, + 472, + 216, + 482 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 134, + 472, + 216, + 482 + ], + "spans": [ + { + "bbox": [ + 134, + 472, + 216, + 482 + ], + "type": "text", + "content": "I. INTRODUCTION" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 50, + 488, + 299, + 620 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 488, + 299, + 620 + ], + "spans": [ + { + "bbox": [ + 50, + 488, + 299, + 620 + ], + "type": "text", + "content": "Obesity is a significant risk factor for cardiometabolic diseases, including type 2 diabetes (T2D), cardiovascular disease (CVD), non-alcoholic fatty liver disease, and hypertension [1] [2]. Body composition (BC) analysis focuses on the distribution of fatty and non-fatty tissues, particularly in depots such as adipose tissue, muscle, liver, and bone, playing a crucial role in predicting and preventing these diseases [3]. Among adipose tissue compartments, visceral adipose tissue (VAT) and subcutaneous adipose tissue (SAT) are key. VAT, located within the abdominal cavity [4], [5]. Similarly, SAT, located beneath the skin [3] [6], [7], [8], [17]." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 50, + 620, + 299, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 620, + 299, + 704 + ], + "spans": [ + { + "bbox": [ + 50, + 620, + 299, + 704 + ], + "type": "text", + "content": "This study introduces Attention GhostUNet++, an advanced deep learning model that incorporates Channel, Spatial, and Depth Attention mechanisms into the Ghost UNet++ bottleneck for automated segmentation of VAT, SAT, and liver regions from CT images. Using the AATTCT-IDS [10] and LiTS [11] datasets, the model achieved Dice coefficients of 0.9430 (VAT), 0.9639 (SAT), and 0.9652 (liver)," + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 50, + 714, + 299, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 714, + 299, + 734 + ], + "spans": [ + { + "bbox": [ + 50, + 714, + 299, + 734 + ], + "type": "text", + "content": "This research is funded by Thailand Science Research and Innovation Fund Chulalongkorn University (IND_FF_68_280_2100_039)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 309, + 159, + 558, + 207 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 159, + 558, + 207 + ], + "spans": [ + { + "bbox": [ + 309, + 159, + 558, + 207 + ], + "type": "text", + "content": "outperforming baseline models in most metrics. However, challenges remain in SAT and VAT boundary segmentation, where Jaccard indices of 0.9639 (SAT) and 0.9430 (VAT) fell short of UNet's performance in specific cases." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 309, + 208, + 558, + 280 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 208, + 558, + 280 + ], + "spans": [ + { + "bbox": [ + 309, + 208, + 558, + 280 + ], + "type": "text", + "content": "The proposed model reduces reliance on manual annotation, providing an efficient, scalable solution for BC analysis. It addresses challenges in large-scale imaging studies, paving the way for broader applications in multi-class segmentation and personalized healthcare, with future work focusing on boundary refinement and dataset generalization." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 398, + 286, + 470, + 297 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 398, + 286, + 470, + 297 + ], + "spans": [ + { + "bbox": [ + 398, + 286, + 470, + 297 + ], + "type": "text", + "content": "METHODOLOGY" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 310, + 301, + 372, + 312 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 301, + 372, + 312 + ], + "spans": [ + { + "bbox": [ + 310, + 301, + 372, + 312 + ], + "type": "text", + "content": "Ghost Module" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 309, + 316, + 558, + 424 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 316, + 558, + 424 + ], + "spans": [ + { + "bbox": [ + 309, + 316, + 558, + 424 + ], + "type": "text", + "content": "The Ghost-UNet [9] architecture is designed to enhance computational efficiency by minimizing redundancy in feature maps. Traditional convolution operations are computationally intensive for generating high-dimensional feature maps. The Ghost module addresses this limitation by employing lightweight operations to generate ghost feature maps, which are lower-resolution representations of the input features. This significantly improves efficiency without sacrificing accuracy." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 320, + 424, + 531, + 436 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 424, + 531, + 436 + ], + "spans": [ + { + "bbox": [ + 320, + 424, + 531, + 436 + ], + "type": "text", + "content": "The Ghost module is mathematically expressed as:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 392, + 442, + 474, + 456 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 392, + 442, + 474, + 456 + ], + "spans": [ + { + "bbox": [ + 392, + 442, + 474, + 456 + ], + "type": "interline_equation", + "content": "\\mathbf {F} _ {o} = \\gamma (\\mathbf {W} * \\mathbf {F} _ {i}) + \\beta", + "image_path": "ec8d5f5467f110fe1d29c51943549b77706402d48a56da7f3330d093bd429de1.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 310, + 462, + 340, + 472 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 462, + 340, + 472 + ], + "spans": [ + { + "bbox": [ + 310, + 462, + 340, + 472 + ], + "type": "text", + "content": "where:" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 320, + 475, + 533, + 535 + ], + "type": "list", + "angle": 0, + "index": 22, + "blocks": [ + { + "bbox": [ + 320, + 475, + 496, + 487 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 475, + 496, + 487 + ], + "spans": [ + { + "bbox": [ + 320, + 475, + 496, + 487 + ], + "type": "text", + "content": "- " + }, + { + "bbox": [ + 320, + 475, + 496, + 487 + ], + "type": "inline_equation", + "content": "\\mathbf{F}_o" + }, + { + "bbox": [ + 320, + 475, + 496, + 487 + ], + "type": "text", + "content": ": High-resolution output feature map," + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 320, + 488, + 488, + 499 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 488, + 488, + 499 + ], + "spans": [ + { + "bbox": [ + 320, + 488, + 488, + 499 + ], + "type": "text", + "content": "- " + }, + { + "bbox": [ + 320, + 488, + 488, + 499 + ], + "type": "inline_equation", + "content": "\\mathbf{F}_i" + }, + { + "bbox": [ + 320, + 488, + 488, + 499 + ], + "type": "text", + "content": ": Low-resolution ghost feature map," + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 320, + 500, + 533, + 511 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 500, + 533, + 511 + ], + "spans": [ + { + "bbox": [ + 320, + 500, + 533, + 511 + ], + "type": "text", + "content": "- W: Weight tensor of the ghost convolution layer," + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 320, + 512, + 524, + 524 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 512, + 524, + 524 + ], + "spans": [ + { + "bbox": [ + 320, + 512, + 524, + 524 + ], + "type": "text", + "content": "- " + }, + { + "bbox": [ + 320, + 512, + 524, + 524 + ], + "type": "inline_equation", + "content": "\\gamma" + }, + { + "bbox": [ + 320, + 512, + 524, + 524 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 320, + 512, + 524, + 524 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 320, + 512, + 524, + 524 + ], + "type": "text", + "content": ": Learnable scale and shift parameters," + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 320, + 525, + 462, + 535 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 525, + 462, + 535 + ], + "spans": [ + { + "bbox": [ + 320, + 525, + 462, + 535 + ], + "type": "inline_equation", + "content": "\\star" + }, + { + "bbox": [ + 320, + 525, + 462, + 535 + ], + "type": "text", + "content": " Ghost convolution operation." + } + ] + } + ], + "index": 21 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 320, + 537, + 543, + 549 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 537, + 543, + 549 + ], + "spans": [ + { + "bbox": [ + 320, + 537, + 543, + 549 + ], + "type": "text", + "content": "The ghost convolution operation is further defined as:" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 374, + 554, + 494, + 576 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 374, + 554, + 494, + 576 + ], + "spans": [ + { + "bbox": [ + 374, + 554, + 494, + 576 + ], + "type": "interline_equation", + "content": "\\mathbf {F} _ {i} [ m ] = \\sum_ {n} W [ n, m ] * \\mathbf {X} [ n ] + b", + "image_path": "e7d8015d06805f56549baf3b54838bdac7bae89f6ec5be0ae5b12c446878b459.jpg" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 309, + 580, + 558, + 604 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 580, + 558, + 604 + ], + "spans": [ + { + "bbox": [ + 309, + 580, + 558, + 604 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 309, + 580, + 558, + 604 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 309, + 580, + 558, + 604 + ], + "type": "text", + "content": " represents the ghost channels, " + }, + { + "bbox": [ + 309, + 580, + 558, + 604 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 309, + 580, + 558, + 604 + ], + "type": "text", + "content": " is the input feature map index, and " + }, + { + "bbox": [ + 309, + 580, + 558, + 604 + ], + "type": "inline_equation", + "content": "b" + }, + { + "bbox": [ + 309, + 580, + 558, + 604 + ], + "type": "text", + "content": " denotes the bias term." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 309, + 611, + 464, + 622 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 611, + 464, + 622 + ], + "spans": [ + { + "bbox": [ + 309, + 611, + 464, + 622 + ], + "type": "text", + "content": "Attention GhostUNet++ Architecture" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 309, + 625, + 558, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 625, + 558, + 734 + ], + "spans": [ + { + "bbox": [ + 309, + 625, + 558, + 734 + ], + "type": "text", + "content": "In the proposed Attention GhostUNet++, Ghost bottleneck layers are integrated into the UNet++ architecture, each enhanced with Channel, Spatial, and Depth Attention mechanisms. These mechanisms improve feature refinement by dynamically emphasizing relevant regions in the feature maps while suppressing redundant information. The network consists of 15 bottleneck layers arranged within a nested architecture that supports robust contraction and expansion paths:" + } + ] + } + ], + "index": 27 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 14, + 207, + 36, + 568 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 207, + 36, + 568 + ], + "spans": [ + { + "bbox": [ + 14, + 207, + 36, + 568 + ], + "type": "text", + "content": "arXiv:2504.11491v1 [eess.IV] 14 Apr 2025" + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 60, + 52, + 561, + 350 + ], + "blocks": [ + { + "bbox": [ + 60, + 52, + 561, + 350 + ], + "lines": [ + { + "bbox": [ + 60, + 52, + 561, + 350 + ], + "spans": [ + { + "bbox": [ + 60, + 52, + 561, + 350 + ], + "type": "image", + "image_path": "5079be29d2c99a728fa0fdb107927d0284c0f855bf5a5d187f005a1314ab8877.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 222, + 361, + 388, + 373 + ], + "lines": [ + { + "bbox": [ + 222, + 361, + 388, + 373 + ], + "spans": [ + { + "bbox": [ + 222, + 361, + 388, + 373 + ], + "type": "text", + "content": "Fig. 1. Attention GhostUNet++ Architecture." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 60, + 395, + 298, + 441 + ], + "type": "list", + "angle": 0, + "index": 4, + "blocks": [ + { + "bbox": [ + 60, + 395, + 298, + 417 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 395, + 298, + 417 + ], + "spans": [ + { + "bbox": [ + 60, + 395, + 298, + 417 + ], + "type": "text", + "content": "- Contraction Path: Extracts features at multiple resolutions." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 60, + 419, + 298, + 441 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 419, + 298, + 441 + ], + "spans": [ + { + "bbox": [ + 60, + 419, + 298, + 441 + ], + "type": "text", + "content": "- Expansion Path: Reconstructs feature maps for precise segmentation." + } + ] + } + ], + "index": 3 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 50, + 445, + 299, + 466 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 445, + 299, + 466 + ], + "spans": [ + { + "bbox": [ + 50, + 445, + 299, + 466 + ], + "type": "text", + "content": "Each bottleneck layer in the network can be represented as:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 135, + 468, + 215, + 481 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 135, + 468, + 215, + 481 + ], + "spans": [ + { + "bbox": [ + 135, + 468, + 215, + 481 + ], + "type": "interline_equation", + "content": "\\mathbf {F} _ {o} = \\mathcal {A} (\\mathcal {G} (\\mathbf {F} _ {i}, \\Theta))", + "image_path": "2ab96b9f4d0721ad90482187eeaf501236b56a8f77b60c04fd63f3e0d9a002bd.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 51, + 487, + 82, + 497 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 487, + 82, + 497 + ], + "spans": [ + { + "bbox": [ + 51, + 487, + 82, + 497 + ], + "type": "text", + "content": "where:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 61, + 501, + 201, + 559 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 61, + 501, + 140, + 512 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 501, + 140, + 512 + ], + "spans": [ + { + "bbox": [ + 61, + 501, + 140, + 512 + ], + "type": "text", + "content": "- " + }, + { + "bbox": [ + 61, + 501, + 140, + 512 + ], + "type": "inline_equation", + "content": "\\mathbf{F}_i" + }, + { + "bbox": [ + 61, + 501, + 140, + 512 + ], + "type": "text", + "content": ": Input tensor," + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 61, + 513, + 148, + 523 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 513, + 148, + 523 + ], + "spans": [ + { + "bbox": [ + 61, + 513, + 148, + 523 + ], + "type": "text", + "content": "- " + }, + { + "bbox": [ + 61, + 513, + 148, + 523 + ], + "type": "inline_equation", + "content": "\\mathbf{F}_o" + }, + { + "bbox": [ + 61, + 513, + 148, + 523 + ], + "type": "text", + "content": ": Output tensor," + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 61, + 525, + 182, + 536 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 525, + 182, + 536 + ], + "spans": [ + { + "bbox": [ + 61, + 525, + 182, + 536 + ], + "type": "inline_equation", + "content": "\\mathcal{G}" + }, + { + "bbox": [ + 61, + 525, + 182, + 536 + ], + "type": "text", + "content": " : Ghost bottleneck layer," + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 61, + 537, + 179, + 547 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 537, + 179, + 547 + ], + "spans": [ + { + "bbox": [ + 61, + 537, + 179, + 547 + ], + "type": "inline_equation", + "content": "\\mathcal{A}" + }, + { + "bbox": [ + 61, + 537, + 179, + 547 + ], + "type": "text", + "content": " : Attention mechanism," + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 61, + 548, + 201, + 559 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 548, + 201, + 559 + ], + "spans": [ + { + "bbox": [ + 61, + 548, + 201, + 559 + ], + "type": "text", + "content": "- " + }, + { + "bbox": [ + 61, + 548, + 201, + 559 + ], + "type": "inline_equation", + "content": "\\Theta" + }, + { + "bbox": [ + 61, + 548, + 201, + 559 + ], + "type": "text", + "content": ": Set of learnable parameters." + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 50, + 562, + 299, + 585 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 562, + 299, + 585 + ], + "spans": [ + { + "bbox": [ + 50, + 562, + 299, + 585 + ], + "type": "text", + "content": "At each hierarchical level " + }, + { + "bbox": [ + 50, + 562, + 299, + 585 + ], + "type": "inline_equation", + "content": "l" + }, + { + "bbox": [ + 50, + 562, + 299, + 585 + ], + "type": "text", + "content": " in the network, the feature maps are calculated as:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 591, + 246, + 605 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 591, + 246, + 605 + ], + "spans": [ + { + "bbox": [ + 104, + 591, + 246, + 605 + ], + "type": "interline_equation", + "content": "\\mathbf {F} _ {l} = \\mathcal {P} (\\mathcal {G} (\\mathbf {F} _ {l - 1})) + \\mathcal {U} (\\mathcal {G} (\\mathbf {F} _ {l + 1}))", + "image_path": "4f954950bdb0f6a41bad3466a3dc840568cd98b44fdf8b38c9c39ce11267a15f.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 51, + 611, + 82, + 620 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 611, + 82, + 620 + ], + "spans": [ + { + "bbox": [ + 51, + 611, + 82, + 620 + ], + "type": "text", + "content": "where:" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 61, + 624, + 186, + 658 + ], + "type": "list", + "angle": 0, + "index": 20, + "blocks": [ + { + "bbox": [ + 61, + 624, + 166, + 635 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 624, + 166, + 635 + ], + "spans": [ + { + "bbox": [ + 61, + 624, + 166, + 635 + ], + "type": "inline_equation", + "content": "\\mathcal{P}" + }, + { + "bbox": [ + 61, + 624, + 166, + 635 + ], + "type": "text", + "content": " : Pooling operation," + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 61, + 636, + 186, + 647 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 636, + 186, + 647 + ], + "spans": [ + { + "bbox": [ + 61, + 636, + 186, + 647 + ], + "type": "inline_equation", + "content": "\\mathcal{U}" + }, + { + "bbox": [ + 61, + 636, + 186, + 647 + ], + "type": "text", + "content": " : Up-sampling operation," + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 61, + 648, + 147, + 658 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 648, + 147, + 658 + ], + "spans": [ + { + "bbox": [ + 61, + 648, + 147, + 658 + ], + "type": "inline_equation", + "content": "\\mathcal{G}" + }, + { + "bbox": [ + 61, + 648, + 147, + 658 + ], + "type": "text", + "content": " : Ghost module." + } + ] + } + ], + "index": 19 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 61, + 662, + 206, + 673 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 662, + 206, + 673 + ], + "spans": [ + { + "bbox": [ + 61, + 662, + 206, + 673 + ], + "type": "text", + "content": "The final output of the network is:" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 120, + 680, + 230, + 693 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 680, + 230, + 693 + ], + "spans": [ + { + "bbox": [ + 120, + 680, + 230, + 693 + ], + "type": "interline_equation", + "content": "\\mathbf {F} _ {\\text {f i n a l}} = \\mathbf {F} _ {1} + \\mathbf {F} _ {2} + \\dots + \\mathbf {F} _ {n}", + "image_path": "4a7090403da2f1c1ed4b90cfa9ff6b50888ee9966f9cdf359f14f31e43b2f63a.jpg" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 50, + 698, + 299, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 698, + 299, + 734 + ], + "spans": [ + { + "bbox": [ + 50, + 698, + 299, + 734 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 50, + 698, + 299, + 734 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 50, + 698, + 299, + 734 + ], + "type": "text", + "content": " is the number of hierarchical levels in the architecture. This innovative integration of Ghost bottleneck layers with attention mechanisms enables Attention GhostUNet++" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 309, + 395, + 559, + 431 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 395, + 559, + 431 + ], + "spans": [ + { + "bbox": [ + 309, + 395, + 559, + 431 + ], + "type": "text", + "content": "to achieve high accuracy, computational efficiency, and improved feature refinement, making it a robust solution for medical image segmentation." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 370, + 442, + 499, + 453 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 370, + 442, + 499, + 453 + ], + "spans": [ + { + "bbox": [ + 370, + 442, + 499, + 453 + ], + "type": "text", + "content": "II. EXPERIMENTAL RESULTS" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 310, + 460, + 349, + 470 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 460, + 349, + 470 + ], + "spans": [ + { + "bbox": [ + 310, + 460, + 349, + 470 + ], + "type": "text", + "content": "Datasets" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 310, + 476, + 427, + 488 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 476, + 427, + 488 + ], + "spans": [ + { + "bbox": [ + 310, + 476, + 427, + 488 + ], + "type": "text", + "content": "Datasets and Preprocessing" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 309, + 493, + 559, + 564 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 493, + 559, + 564 + ], + "spans": [ + { + "bbox": [ + 309, + 493, + 559, + 564 + ], + "type": "text", + "content": "This study evaluates the proposed Attention GhostUNet++ model using two datasets: the Abdominal Adipose Tissue CT Image Dataset (AATTCT-IDS) [10] and the Liver Tumor Segmentation Benchmark (LiTS) [11]. These datasets provide annotated CT images for segmenting SAT, VAT, and liver regions." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 309, + 565, + 559, + 649 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 565, + 559, + 649 + ], + "spans": [ + { + "bbox": [ + 309, + 565, + 559, + 649 + ], + "type": "text", + "content": "The AATTCT-IDS dataset [10] includes 13,732 CT slices (3,213 annotated) from 300 subjects, focusing on SAT and VAT. Challenges arise from individual variability and overlapping boundaries between these compartments. The LiTS dataset [11] contains 201 CT volumes annotated for liver regions, featuring diverse liver shapes and pathologies. Liver segmentation was prioritized in this study." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 309, + 650, + 558, + 709 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 650, + 558, + 709 + ], + "spans": [ + { + "bbox": [ + 309, + 650, + 558, + 709 + ], + "type": "text", + "content": "Preprocessing included resizing CT slices to a consistent resolution, normalizing pixel intensities, and cropping volumes to focus on regions of interest. Data augmentation (e.g., rotations, flipping, scaling) was applied to enhance variability and reduce overfitting." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 309, + 710, + 559, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 710, + 559, + 734 + ], + "spans": [ + { + "bbox": [ + 309, + 710, + 559, + 734 + ], + "type": "text", + "content": "These datasets were critical for validating the robustness and generalizability of the Attention GhostUNet++ model" + } + ] + } + ], + "index": 31 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 55, + 49, + 557, + 296 + ], + "blocks": [ + { + "bbox": [ + 55, + 49, + 557, + 296 + ], + "lines": [ + { + "bbox": [ + 55, + 49, + 557, + 296 + ], + "spans": [ + { + "bbox": [ + 55, + 49, + 557, + 296 + ], + "type": "image", + "image_path": "7b5c57a797698a60272f63df9d03e910ea3876702b26ae0df2950ec47105d257.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 50, + 308, + 560, + 338 + ], + "lines": [ + { + "bbox": [ + 50, + 308, + 560, + 338 + ], + "spans": [ + { + "bbox": [ + 50, + 308, + 560, + 338 + ], + "type": "text", + "content": "Fig. 2. Segmentation results for randomly selected CT image examples, the columns represent (1) the original CT image, (2) ground truth annotations, (3) model-predicted segmentation output, (4) mask differences between ground truth and predictions, (5) predicted segmentation masks overlaid on the original CT image." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 50, + 359, + 299, + 382 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 359, + 299, + 382 + ], + "spans": [ + { + "bbox": [ + 50, + 359, + 299, + 382 + ], + "type": "text", + "content": "in segmenting complex anatomical structures across varying scenarios." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 51, + 391, + 124, + 403 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 391, + 124, + 403 + ], + "spans": [ + { + "bbox": [ + 51, + 391, + 124, + 403 + ], + "type": "text", + "content": "Training Settings" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 50, + 406, + 299, + 489 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 406, + 299, + 489 + ], + "spans": [ + { + "bbox": [ + 50, + 406, + 299, + 489 + ], + "type": "text", + "content": "The proposed Attention GhostUNet++ model was implemented in PyTorch 2.0 and trained on an Nvidia 3090Ti GPU. Xavier initialization ensured effective weight scaling. The datasets were split into training, validation, and test sets (70:20:10), with data augmentation (rotations, flipping, scaling, and intensity variations) applied to improve generalization." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 50, + 490, + 299, + 561 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 490, + 299, + 561 + ], + "spans": [ + { + "bbox": [ + 50, + 490, + 299, + 561 + ], + "type": "text", + "content": "Training used the Adam optimizer [16] " + }, + { + "bbox": [ + 50, + 490, + 299, + 561 + ], + "type": "inline_equation", + "content": "(1\\times 10^{-4}" + }, + { + "bbox": [ + 50, + 490, + 299, + 561 + ], + "type": "text", + "content": " initial learning rate, cosine annealing decay) and a combined Dice and cross-entropy loss. Mini-batches of size 16 optimized GPU usage, while early stopping (100-epoch patience) prevented overfitting. The best-performing model was selected based on the validation Dice coefficient (DC)." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 50, + 562, + 299, + 586 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 562, + 299, + 586 + ], + "spans": [ + { + "bbox": [ + 50, + 562, + 299, + 586 + ], + "type": "text", + "content": "Segmentation accuracy was evaluated using the DC and Jaccard index (JI):" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 83, + 590, + 267, + 617 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 590, + 267, + 617 + ], + "spans": [ + { + "bbox": [ + 83, + 590, + 267, + 617 + ], + "type": "interline_equation", + "content": "\\text {D i c e} = \\frac {2 | \\mathbf {P} \\cap \\mathbf {G} |}{| \\mathbf {P} | + | \\mathbf {G} |} = \\frac {2 \\sum_ {i} P _ {i} G _ {i}}{\\sum_ {i} P _ {i} + \\sum_ {i} G _ {i}}", + "image_path": "8b54563372ae0a87b1fcfb1beb4c93035e982aa37ee045b35ff8c2c619ecf378.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 72, + 620, + 277, + 647 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 620, + 277, + 647 + ], + "spans": [ + { + "bbox": [ + 72, + 620, + 277, + 647 + ], + "type": "interline_equation", + "content": "\\text {J a c c a r d I n d e x} = \\frac {\\left| \\mathbf {P} \\cap \\mathbf {G} \\right|}{\\left| \\mathbf {P} \\cup \\mathbf {G} \\right|} = \\frac {\\sum_ {i} P _ {i} G _ {i}}{\\sum_ {i} P _ {i} + \\sum_ {i} G _ {i} - \\sum_ {i} P _ {i} G _ {i}}", + "image_path": "ef90033573bf128037444d4ea977ce7d72c28d61f449c72254483e43e9098d1e.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 50, + 651, + 299, + 676 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 651, + 299, + 676 + ], + "spans": [ + { + "bbox": [ + 50, + 651, + 299, + 676 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 50, + 651, + 299, + 676 + ], + "type": "inline_equation", + "content": "P_{i}" + }, + { + "bbox": [ + 50, + 651, + 299, + 676 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 50, + 651, + 299, + 676 + ], + "type": "inline_equation", + "content": "G_{i}" + }, + { + "bbox": [ + 50, + 651, + 299, + 676 + ], + "type": "text", + "content": " represent the predicted and ground truth segmentation at pixel " + }, + { + "bbox": [ + 50, + 651, + 299, + 676 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 50, + 651, + 299, + 676 + ], + "type": "text", + "content": ", respectively." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 51, + 683, + 153, + 694 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 683, + 153, + 694 + ], + "spans": [ + { + "bbox": [ + 51, + 683, + 153, + 694 + ], + "type": "text", + "content": "Performance Evaluation" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 50, + 698, + 299, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 698, + 299, + 734 + ], + "spans": [ + { + "bbox": [ + 50, + 698, + 299, + 734 + ], + "type": "text", + "content": "The performance of the proposed Attention GhostUNet++ model was compared against baseline models (UNet [12], UNet++ [13], ResUNet [14] and GhostUNet++" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 309, + 359, + 558, + 395 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 359, + 558, + 395 + ], + "spans": [ + { + "bbox": [ + 309, + 359, + 558, + 395 + ], + "type": "text", + "content": "[15] for VAT, SAT, and liver. Evaluation metrics included the DC and JI, which assess segmentation accuracy and overlap, respectively." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 309, + 396, + 559, + 503 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 396, + 559, + 503 + ], + "spans": [ + { + "bbox": [ + 309, + 396, + 559, + 503 + ], + "type": "text", + "content": "The proposed model outperformed the baseline models in most cases. As indicated in Table I, for VAT segmentation, it achieved a DC of 0.9430 and a JI of 0.9430, closely competing with UNet's JI of 0.9491. In SAT segmentation, it achieved a DC of 0.9639, matching UNet's JI of 0.9807 but indicating room for improvement in handling boundary details. For liver segmentation, Attention GhostUNet++ demonstrated superior performance with a Dice coefficient of 0.9652 and a JI of 0.9496, outperforming all baselines." + } + ] + } + ], + "index": 13 + }, + { + "type": "table", + "bbox": [ + 310, + 557, + 569, + 661 + ], + "blocks": [ + { + "bbox": [ + 315, + 514, + 553, + 548 + ], + "lines": [ + { + "bbox": [ + 315, + 514, + 553, + 548 + ], + "spans": [ + { + "bbox": [ + 315, + 514, + 553, + 548 + ], + "type": "text", + "content": "TABLEI MEAN SEGMENTATION DICE AND JACCARD SCORES FOR DIFFERENT TARGETS IN AATTCT-IDS [10] AND LITS [11] DATASETS" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 310, + 557, + 569, + 661 + ], + "lines": [ + { + "bbox": [ + 310, + 557, + 569, + 661 + ], + "spans": [ + { + "bbox": [ + 310, + 557, + 569, + 661 + ], + "type": "table", + "html": "
MethodMetricsVATSATLiver
UNet [12]Dice coefficient0.90570.96040.8746
Jaccard index0.94910.98070.8456
UNet++ [13]Dice coefficient0.87420.87410.9468
Jaccard index0.81570.86390.9311
ResUNet [14]Dice coefficient0.91840.94820.9587
Jaccard index0.90210.96530.9412
GhostUNet++ [15]Dice coefficient0.88470.89160.9554
Jaccard index0.79160.84510.9318
Attention GhostUNet++Dice coefficient0.94300.96390.9652
Jaccard index0.94300.96390.9496
", + "image_path": "c874966e39c3429c376f6207375e6f36f90340ce39b6090ca28d05035063b76a.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "table_body" + } + ], + "index": 15 + }, + { + "bbox": [ + 309, + 674, + 559, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 674, + 559, + 734 + ], + "spans": [ + { + "bbox": [ + 309, + 674, + 559, + 734 + ], + "type": "text", + "content": "These results confirm the effectiveness of the Attention GhostUNet++ [15] model for medical image segmentation. While achieving state-of-the-art performance across most tasks, minor limitations in SAT segmentation suggest opportunities for further refinement to enhance boundary accuracy" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 51, + 53, + 167, + 64 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 53, + 167, + 64 + ], + "spans": [ + { + "bbox": [ + 51, + 53, + 167, + 64 + ], + "type": "text", + "content": "and overall generalizability." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 50, + 64, + 299, + 183 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 64, + 299, + 183 + ], + "spans": [ + { + "bbox": [ + 50, + 64, + 299, + 183 + ], + "type": "text", + "content": "Fig. 2 showcases example segmentation outputs from the test dataset. Ground truth annotations are displayed alongside predictions from each model. The Attention GhostUNet++ [15] model exhibits superior boundary adherence and accurate region segmentation, particularly in challenging cases where SAT and VAT boundaries overlap or are difficult to distinguish. Additionally, for liver segmentation, the proposed model effectively captures the organ's irregular contours, delivering results that clearly outperform those of the baseline models." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 50, + 184, + 299, + 233 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 184, + 299, + 233 + ], + "spans": [ + { + "bbox": [ + 50, + 184, + 299, + 233 + ], + "type": "text", + "content": "These visual comparisons underscore the model's capability to handle complex segmentation tasks with enhanced precision and boundary accuracy, making it a robust solution for medical imaging applications." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 135, + 242, + 216, + 253 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 135, + 242, + 216, + 253 + ], + "spans": [ + { + "bbox": [ + 135, + 242, + 216, + 253 + ], + "type": "text", + "content": "III. CONCLUSION" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 50, + 258, + 299, + 330 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 258, + 299, + 330 + ], + "spans": [ + { + "bbox": [ + 50, + 258, + 299, + 330 + ], + "type": "text", + "content": "We proposed Attention GhostUNet++, a novel deep learning architecture for segmenting SAT, VAT, and liver regions in CT images. By integrating Channel, Spatial, and Depth Attention mechanisms into Ghost-Net bottleneck layers, the model achieves enhanced feature refinement and contextual understanding with computational efficiency." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 50, + 331, + 299, + 402 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 331, + 299, + 402 + ], + "spans": [ + { + "bbox": [ + 50, + 331, + 299, + 402 + ], + "type": "text", + "content": "Experiments on AATTCT-IDS[10] and LiTS[11] datasets demonstrated state-of-the-art performance, with DCs of 0.9430 (VAT), 0.9639 (SAT), and 0.9652 (liver). Visual comparisons highlighted the model's ability to accurately segment complex anatomical structures, reducing boundary errors and outperforming baseline models." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 50, + 403, + 299, + 475 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 403, + 299, + 475 + ], + "spans": [ + { + "bbox": [ + 50, + 403, + 299, + 475 + ], + "type": "text", + "content": "This automated solution reduces reliance on manual annotations and enhances scalability for clinical and research applications. Future work will focus on addressing limitations, extending to multi-class tasks, and validating across diverse datasets, paving the way for efficient and accurate medical imaging tools." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 100, + 484, + 250, + 495 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 100, + 484, + 250, + 495 + ], + "spans": [ + { + "bbox": [ + 100, + 484, + 250, + 495 + ], + "type": "text", + "content": "LIMITATIONS AND FUTURE WORK" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 50, + 501, + 300, + 669 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 501, + 300, + 669 + ], + "spans": [ + { + "bbox": [ + 50, + 501, + 300, + 669 + ], + "type": "text", + "content": "The proposed Attention GhostUNet++ model faces minor limitations in handling fine-grained boundaries, particularly for SAT segmentation, and its generalizability to diverse imaging modalities remains untested. It also requires further evaluation on datasets with significant variations in anatomical structures and imaging quality. Future work will focus on enhancing boundary detection, expanding validation to multi-class tasks and diverse datasets, and optimizing computational efficiency for clinical deployment. Additionally, integrating advanced techniques, such as edge-aware attention, could improve performance in challenging regions. Extending the model to segment complex pathological structures will further enhance its utility in diagnostic and therapeutic applications." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 146, + 678, + 204, + 689 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 146, + 678, + 204, + 689 + ], + "spans": [ + { + "bbox": [ + 146, + 678, + 204, + 689 + ], + "type": "text", + "content": "REFERENCES" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 55, + 696, + 299, + 734 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 696, + 299, + 734 + ], + "spans": [ + { + "bbox": [ + 55, + 696, + 299, + 734 + ], + "type": "text", + "content": "[1] Mokdad, Ali H., Earl S. Ford, Barbara A. Bowman, William H. Dietz, Frank Vinicor, Virginia S. Bales, and James S. Marks. \"Prevalence of obesity, diabetes, and obesity-related health risk factors, 2001.\" Jama 289, no. 1 (2003): 76-79." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 312, + 53, + 559, + 683 + ], + "type": "list", + "angle": 0, + "index": 27, + "blocks": [ + { + "bbox": [ + 316, + 53, + 558, + 99 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 53, + 558, + 99 + ], + "spans": [ + { + "bbox": [ + 316, + 53, + 558, + 99 + ], + "type": "text", + "content": "[2] Kaess, Bernhard M., Jacek Jozwiak, Miroslaw Mastej, Witold Lukas, Wladyslaw Grzeseczak, Adam Windak, Wieslawa Piwowarska et al. \"Association between anthropometric obesity measures and coronary artery disease: a cross-sectional survey of 16 657 subjects from 444 Polish cities.\" Heart 96, no. 2 (2010): 131-135." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 316, + 100, + 559, + 144 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 100, + 559, + 144 + ], + "spans": [ + { + "bbox": [ + 316, + 100, + 559, + 144 + ], + "type": "text", + "content": "[3] Kullberg, Joel, Anders Hedstrom, John Brandberg, Robin Strand, Lars Johansson, Goran Bergstrom, and Håkan Ahlström. \"Automated analysis of liver fat, muscle and adipose tissue distribution from CT suitable for large-scale studies.\" Scientific reports 7, no. 1 (2017): 10425." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 316, + 144, + 558, + 189 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 144, + 558, + 189 + ], + "spans": [ + { + "bbox": [ + 316, + 144, + 558, + 189 + ], + "type": "text", + "content": "[4] Tanaka, Muhei, Hiroshi Okada, Yoshitaka Hashimoto, Muneaki Kumagai, Hiromi Nishimura, and Michiaki Fukui. \"Distinct associations of intraperitoneal and retroperitoneal visceral adipose tissues with metabolic syndrome and its components.\" Clinical Nutrition 40, no. 5 (2021): 3479-3484.." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 316, + 189, + 558, + 233 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 189, + 558, + 233 + ], + "spans": [ + { + "bbox": [ + 316, + 189, + 558, + 233 + ], + "type": "text", + "content": "[5] Tanaka, M., Okada, H., Hashimoto, Y., Kumagai, M., Nishimura, H., & Fukui, M. (2020). Intraperitoneal, but not retroperitoneal, visceral adipose tissue is associated with diabetes mellitus: a cross-sectional, retrospective pilot analysis. Diabetology & Metabolic Syndrome, 12, 1-10." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 316, + 233, + 558, + 270 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 233, + 558, + 270 + ], + "spans": [ + { + "bbox": [ + 316, + 233, + 558, + 270 + ], + "type": "text", + "content": "[6] Christen, T., Sheikine, Y., Rocha, V. Z., Hurwitz, S., Goldfine, A. B., Di Carli, M., & Libby, P. (2010). Increased glucose uptake in visceral versus subcutaneous adipose tissue revealed by PET imaging. JACC: Cardiovascular Imaging, 3(8), 843-851." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 316, + 270, + 558, + 306 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 270, + 558, + 306 + ], + "spans": [ + { + "bbox": [ + 316, + 270, + 558, + 306 + ], + "type": "text", + "content": "[7] Kelley, D. E., Thaete, F. L., Troost, F., Huwe, T., & Goodpaster, B. H. (2000). Subdivisions of subcutaneous abdominal adipose tissue and insulin resistance. American Journal of Physiology-Endocrinology and Metabolism, 278(5), E941-E948." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 316, + 306, + 558, + 350 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 306, + 558, + 350 + ], + "spans": [ + { + "bbox": [ + 316, + 306, + 558, + 350 + ], + "type": "text", + "content": "[8] Smith, S. R., Lovejoy, J. C., Greenway, F., Ryan, D., deJonge, L., de la Bretonne, J., ... & Bray, G. A. (2001). Contributions of total body fat, abdominal subcutaneous adipose tissue compartments, and visceral adipose tissue to the metabolic complications of obesity. Metabolism-Clinical and Experimental, 50(4), 425-435." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 316, + 350, + 558, + 377 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 350, + 558, + 377 + ], + "spans": [ + { + "bbox": [ + 316, + 350, + 558, + 377 + ], + "type": "text", + "content": "[9] Kazerouni, I. A., Dooly, G., & Toal, D. (2021). Ghost-UNet: an asymmetric encoder-decoder architecture for semantic segmentation from scratch. IEEE Access, 9, 97457-97465." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 312, + 377, + 558, + 414 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 377, + 558, + 414 + ], + "spans": [ + { + "bbox": [ + 312, + 377, + 558, + 414 + ], + "type": "text", + "content": "[10] Ma, Z., Li, C., Du, T., Zhang, L., Tang, D., Ma, D., ... & Sun, H. (2024). AATCT-IDS: A benchmark Abdominal Adipose Tissue CT Image Dataset for image denoising, semantic segmentation, and radiomics evaluation. Computers in Biology and Medicine, 177, 108628." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 312, + 414, + 558, + 449 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 414, + 558, + 449 + ], + "spans": [ + { + "bbox": [ + 312, + 414, + 558, + 449 + ], + "type": "text", + "content": "[11] Bilic, P., Christ, P., Li, H.B., Vorontsov, E., Ben-Cohen, A., Kaissis, G., Szeskin, A., Jacobs, C., Mamani, G.E.H., Chartrand, G. and Lohofer, F., 2023. The liver tumor segmentation benchmark (lits). Medical Image Analysis, 84, p.102680." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 312, + 449, + 558, + 495 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 449, + 558, + 495 + ], + "spans": [ + { + "bbox": [ + 312, + 449, + 558, + 495 + ], + "type": "text", + "content": "[12] Ronneberger, O., Fischer, P., & Brox, T. (2015). U-net: Convolutional networks for biomedical image segmentation. In Medical image computing and computer-assisted intervention-MICCAI 2015: 18th international conference, Munich, Germany, October 5-9, 2015, proceedings, part III 18 (pp. 234-241). Springer International Publishing." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 312, + 495, + 558, + 566 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 495, + 558, + 566 + ], + "spans": [ + { + "bbox": [ + 312, + 495, + 558, + 566 + ], + "type": "text", + "content": "[13] Zhou, Z., Rahman Siddiquee, M. M., Tajbakhsh, N., & Liang, J. (2018). Unet++: A nested u-net architecture for medical image segmentation. In Deep Learning in Medical Image Analysis and Multimodal Learning for Clinical Decision Support: 4th International Workshop, DLMIA 2018, and 8th International Workshop, ML-CDS 2018, Held in Conjunction with MICCAI 2018, Granada, Spain, September 20, 2018, Proceedings 4 (pp. 3-11). Springer International Publishing." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 312, + 566, + 558, + 593 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 566, + 558, + 593 + ], + "spans": [ + { + "bbox": [ + 312, + 566, + 558, + 593 + ], + "type": "text", + "content": "[14] Rahman, H., Bukht, T. F. N., Imran, A., Tariq, J., Tu, S., & Alzahrani, A. (2022). A deep learning approach for liver and tumor segmentation in CT images using ResUNet. Bioengineering, 9(8), 368." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 312, + 593, + 558, + 629 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 593, + 558, + 629 + ], + "spans": [ + { + "bbox": [ + 312, + 593, + 558, + 629 + ], + "type": "text", + "content": "[15] Ahmad, N., Strand, R., Sparresäter, B., Tarai, S., Lundström, E., Bergström, G., Ahlström, H. and Kullberg, J., 2023. Automatic segmentation of large-scale CT image datasets for detailed body composition analysis. BMC bioinformatics, 24(1), p.346." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 312, + 628, + 558, + 655 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 628, + 558, + 655 + ], + "spans": [ + { + "bbox": [ + 312, + 628, + 558, + 655 + ], + "type": "text", + "content": "[16] Bock, S. and Weiß, M., 2019, July. A proof of local convergence for the Adam optimizer. In 2019 international joint conference on neural networks (IJCNN) (pp. 1-8). IEEE." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 312, + 655, + 558, + 683 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 655, + 558, + 683 + ], + "spans": [ + { + "bbox": [ + 312, + 655, + 558, + 683 + ], + "type": "text", + "content": "[17] Hayat, Mansoor. \"Squeeze & Excitation joint with Combined Channel and Spatial Attention for Pathology Image Super-Resolution.\" Franklin Open 8 (2024): 100170." + } + ] + } + ], + "index": 26 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file